summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--babeld/babel_interface.c2
-rw-r--r--babeld/message.c2
-rw-r--r--bfdd/bfd.c173
-rw-r--r--bfdd/bfd_packet.c22
-rw-r--r--bfdd/bfdd.c4
-rw-r--r--bfdd/dplane.c4
-rw-r--r--bfdd/ptm_adapter.c38
-rw-r--r--bgpd/bgp_aspath.h4
-rw-r--r--bgpd/bgp_attr.c47
-rw-r--r--bgpd/bgp_attr_evpn.c10
-rw-r--r--bgpd/bgp_bmp.c15
-rw-r--r--bgpd/bgp_bmp.h2
-rw-r--r--bgpd/bgp_clist.c80
-rw-r--r--bgpd/bgp_community.c2
-rw-r--r--bgpd/bgp_community.h1
-rw-r--r--bgpd/bgp_dump.c42
-rw-r--r--bgpd/bgp_dump.h28
-rw-r--r--bgpd/bgp_ecommunity.c25
-rw-r--r--bgpd/bgp_ecommunity.h5
-rw-r--r--bgpd/bgp_evpn.c20
-rw-r--r--bgpd/bgp_evpn_mh.c27
-rw-r--r--bgpd/bgp_evpn_mh.h9
-rw-r--r--bgpd/bgp_evpn_private.h8
-rw-r--r--bgpd/bgp_evpn_vty.c63
-rw-r--r--bgpd/bgp_flowspec_util.c2
-rw-r--r--bgpd/bgp_fsm.c19
-rw-r--r--bgpd/bgp_main.c5
-rw-r--r--bgpd/bgp_mplsvpn.c11
-rw-r--r--bgpd/bgp_mplsvpn.h15
-rw-r--r--bgpd/bgp_mplsvpn_snmp.c1689
-rw-r--r--bgpd/bgp_mplsvpn_snmp.h31
-rw-r--r--bgpd/bgp_nb.c1750
-rw-r--r--bgpd/bgp_nb.h1000
-rw-r--r--bgpd/bgp_nb_config.c9007
-rw-r--r--bgpd/bgp_nexthop.c48
-rw-r--r--bgpd/bgp_nexthop.h5
-rw-r--r--bgpd/bgp_nht.c31
-rw-r--r--bgpd/bgp_nht.h5
-rw-r--r--bgpd/bgp_packet.c1
-rw-r--r--bgpd/bgp_pbr.c15
-rw-r--r--bgpd/bgp_route.c428
-rw-r--r--bgpd/bgp_route.h2
-rw-r--r--bgpd/bgp_routemap.c249
-rw-r--r--bgpd/bgp_script.c192
-rw-r--r--bgpd/bgp_script.h34
-rw-r--r--bgpd/bgp_snmp.c7
-rw-r--r--bgpd/bgp_updgrp.c3
-rw-r--r--bgpd/bgp_updgrp.h6
-rw-r--r--bgpd/bgp_updgrp_adv.c36
-rw-r--r--bgpd/bgp_updgrp_packet.c38
-rw-r--r--bgpd/bgp_vty.c420
-rw-r--r--bgpd/bgp_vty.h9
-rw-r--r--bgpd/bgp_zebra.c21
-rw-r--r--bgpd/bgpd.c42
-rw-r--r--bgpd/bgpd.h83
-rw-r--r--bgpd/rfapi/rfapi.c17
-rw-r--r--bgpd/rfapi/rfapi_import.c8
-rw-r--r--bgpd/rfapi/rfapi_private.h3
-rw-r--r--bgpd/rfapi/vnc_export_bgp.c8
-rw-r--r--bgpd/subdir.am5
-rwxr-xr-xconfigure.ac90
-rw-r--r--debian/control3
-rwxr-xr-xdebian/rules7
-rw-r--r--doc/developer/building.rst1
-rw-r--r--doc/developer/cross-compiling.rst326
-rw-r--r--doc/developer/library.rst3
-rw-r--r--doc/developer/lua.rst65
-rw-r--r--doc/developer/ospf-sr.rst66
-rw-r--r--doc/developer/scripting.rst433
-rw-r--r--doc/developer/subdir.am4
-rw-r--r--doc/developer/topotests-markers.rst114
-rw-r--r--doc/developer/topotests.rst29
-rw-r--r--doc/developer/tracing.rst25
-rw-r--r--doc/developer/xrefs.rst170
-rw-r--r--doc/user/bgp.rst102
-rw-r--r--doc/user/fabricd.rst15
-rw-r--r--doc/user/index.rst1
-rw-r--r--doc/user/installation.rst8
-rw-r--r--doc/user/isisd.rst13
-rw-r--r--doc/user/nhrpd.rst13
-rw-r--r--doc/user/ospf6d.rst58
-rw-r--r--doc/user/ospfd.rst14
-rw-r--r--doc/user/pbr.rst15
-rw-r--r--doc/user/pim.rst7
-rw-r--r--doc/user/scripting.rst28
-rw-r--r--doc/user/setup.rst50
-rw-r--r--doc/user/subdir.am1
-rw-r--r--doc/user/zebra.rst14
-rw-r--r--docker/alpine/Dockerfile13
-rw-r--r--docker/centos-7/Dockerfile6
-rw-r--r--docker/centos-8/Dockerfile14
-rw-r--r--eigrpd/eigrp_filter.c63
-rw-r--r--eigrpd/eigrp_interface.c18
-rw-r--r--eigrpd/eigrp_structs.h2
-rw-r--r--eigrpd/eigrp_topology.c1
-rw-r--r--include/linux/if_bridge.h11
-rw-r--r--include/linux/neighbour.h25
-rw-r--r--isisd/isis_adjacency.h1
-rw-r--r--isisd/isis_bfd.c53
-rw-r--r--isisd/isis_bpf.c6
-rw-r--r--isisd/isis_circuit.c6
-rw-r--r--isisd/isis_circuit.h3
-rw-r--r--isisd/isis_cli.c53
-rw-r--r--isisd/isis_constants.h3
-rw-r--r--isisd/isis_dlpi.c5
-rw-r--r--isisd/isis_dr.c3
-rw-r--r--isisd/isis_ldp_sync.c1
-rw-r--r--isisd/isis_lsp.c145
-rw-r--r--isisd/isis_nb.c15
-rw-r--r--isisd/isis_nb.h8
-rw-r--r--isisd/isis_nb_config.c32
-rw-r--r--isisd/isis_pdu.c6
-rw-r--r--isisd/isis_spf.c28
-rw-r--r--isisd/isisd.c40
-rw-r--r--isisd/isisd.h8
-rw-r--r--lib/agentx.c44
-rw-r--r--lib/bfd.c22
-rw-r--r--lib/buffer.c10
-rw-r--r--lib/clippy.c3
-rw-r--r--lib/command.c55
-rw-r--r--lib/command.h33
-rw-r--r--lib/command_graph.h2
-rw-r--r--lib/command_parse.y2
-rw-r--r--lib/compiler.h23
-rw-r--r--lib/filter_cli.c24
-rw-r--r--lib/frr_zmq.c58
-rw-r--r--lib/frr_zmq.h42
-rw-r--r--lib/frrlua.c387
-rw-r--r--lib/frrlua.h216
-rw-r--r--lib/frrscript.c272
-rw-r--r--lib/frrscript.h138
-rw-r--r--lib/hash.c8
-rw-r--r--lib/hash.h2
-rw-r--r--lib/if.c176
-rw-r--r--lib/if.h2
-rw-r--r--lib/lib_vty.c6
-rw-r--r--lib/libfrr.c39
-rw-r--r--lib/libfrr.h5
-rw-r--r--lib/link_state.c15
-rw-r--r--lib/log.c13
-rw-r--r--lib/log.h10
-rw-r--r--lib/module.c2
-rw-r--r--lib/module.h8
-rw-r--r--lib/network.c18
-rw-r--r--lib/network.h18
-rw-r--r--lib/northbound.h2
-rw-r--r--lib/northbound_cli.c26
-rw-r--r--lib/printf/printf-pos.c4
-rw-r--r--lib/printf/vfprintf.c6
-rw-r--r--lib/privs.c6
-rw-r--r--lib/privs.h2
-rw-r--r--lib/resolver.c3
-rwxr-xr-xlib/route_types.pl2
-rw-r--r--lib/routemap.c5
-rw-r--r--lib/sigevent.c2
-rw-r--r--lib/smux.h53
-rw-r--r--lib/snmp.c54
-rw-r--r--lib/stream.c4
-rw-r--r--lib/subdir.am5
-rw-r--r--lib/thread.c137
-rw-r--r--lib/thread.h116
-rw-r--r--lib/vrf.c54
-rw-r--r--lib/vrf.h15
-rw-r--r--lib/vty.c125
-rw-r--r--lib/wheel.c3
-rw-r--r--lib/workqueue.c5
-rw-r--r--lib/xref.c130
-rw-r--r--lib/xref.h272
-rw-r--r--lib/zclient.c22
-rw-r--r--lib/zclient.h4
-rw-r--r--lib/zlog.c22
-rw-r--r--lib/zlog.h79
-rw-r--r--nhrpd/nhrp_main.c9
-rw-r--r--nhrpd/nhrp_route.c26
-rw-r--r--nhrpd/nhrp_shortcut.c16
-rw-r--r--ospf6d/ospf6_abr.c119
-rw-r--r--ospf6d/ospf6_asbr.c94
-rw-r--r--ospf6d/ospf6_flood.c12
-rw-r--r--ospf6d/ospf6_interface.c22
-rw-r--r--ospf6d/ospf6_interface.h1
-rw-r--r--ospf6d/ospf6_intra.c205
-rw-r--r--ospf6d/ospf6_lsa.c238
-rw-r--r--ospf6d/ospf6_lsa.h20
-rw-r--r--ospf6d/ospf6_lsdb.c49
-rw-r--r--ospf6d/ospf6_lsdb.h3
-rw-r--r--ospf6d/ospf6_route.c381
-rw-r--r--ospf6d/ospf6_route.h13
-rw-r--r--ospf6d/ospf6_spf.c7
-rw-r--r--ospf6d/ospf6_spf.h1
-rw-r--r--ospf6d/ospf6_top.c170
-rw-r--r--ospf6d/ospf6_top.h5
-rw-r--r--ospf6d/ospf6_zebra.c9
-rw-r--r--ospf6d/ospf6d.c770
-rw-r--r--ospfclient/ospf_apiclient.c3
-rw-r--r--ospfd/ospf_asbr.c6
-rw-r--r--ospfd/ospf_ase.c78
-rw-r--r--ospfd/ospf_bfd.c6
-rw-r--r--ospfd/ospf_dump.c30
-rw-r--r--ospfd/ospf_dump.h4
-rw-r--r--ospfd/ospf_flood.c53
-rw-r--r--ospfd/ospf_ldp_sync.c1
-rw-r--r--ospfd/ospf_lsa.c60
-rw-r--r--ospfd/ospf_lsa.h6
-rw-r--r--ospfd/ospf_memory.c2
-rw-r--r--ospfd/ospf_memory.h2
-rw-r--r--ospfd/ospf_packet.c10
-rw-r--r--ospfd/ospf_ri.c7
-rw-r--r--ospfd/ospf_route.c83
-rw-r--r--ospfd/ospf_route.h5
-rw-r--r--ospfd/ospf_snmp.c23
-rw-r--r--ospfd/ospf_spf.c658
-rw-r--r--ospfd/ospf_spf.h25
-rw-r--r--ospfd/ospf_sr.c112
-rw-r--r--ospfd/ospf_sr.h13
-rw-r--r--ospfd/ospf_ti_lfa.c1114
-rw-r--r--ospfd/ospf_ti_lfa.h41
-rw-r--r--ospfd/ospf_vty.c175
-rw-r--r--ospfd/ospf_vty.h4
-rw-r--r--ospfd/ospf_zebra.c141
-rw-r--r--ospfd/ospfd.c105
-rw-r--r--ospfd/ospfd.h90
-rw-r--r--ospfd/subdir.am2
-rw-r--r--pbrd/pbr_map.c8
-rw-r--r--pbrd/pbr_nht.c80
-rw-r--r--pbrd/pbr_zebra.c8
-rw-r--r--pimd/pim_bfd.c6
-rw-r--r--pimd/pim_cmd.c79
-rw-r--r--pimd/pim_hello.c16
-rw-r--r--pimd/pim_ifchannel.c33
-rw-r--r--pimd/pim_instance.c8
-rw-r--r--pimd/pim_jp_agg.c26
-rw-r--r--pimd/pim_mroute.c7
-rw-r--r--pimd/pim_msdp_socket.c4
-rw-r--r--pimd/pim_nb_config.c3
-rw-r--r--pimd/pim_rp.c2
-rw-r--r--pimd/pim_vxlan.c22
-rw-r--r--pimd/test_igmpv3_join.c5
-rw-r--r--ripd/rip_interface.c33
-rw-r--r--ripd/rip_snmp.c11
-rw-r--r--ripd/ripd.c17
-rw-r--r--ripngd/ripngd.c140
-rw-r--r--ripngd/ripngd.h36
-rw-r--r--sharpd/sharp_zebra.c32
-rw-r--r--staticd/static_nb.c2
-rw-r--r--staticd/static_nb.h4
-rw-r--r--staticd/static_nb_config.c126
-rw-r--r--tests/.gitignore1
-rw-r--r--tests/lib/test_xref.c140
-rw-r--r--tests/lib/test_xref.py6
-rw-r--r--tests/ospfd/.gitignore3
-rw-r--r--tests/ospfd/common.c248
-rw-r--r--tests/ospfd/common.h47
-rw-r--r--tests/ospfd/test_ospf_spf.c303
-rw-r--r--tests/ospfd/test_ospf_spf.in10
-rw-r--r--tests/ospfd/test_ospf_spf.py4
-rw-r--r--tests/ospfd/test_ospf_spf.refout130
-rw-r--r--tests/ospfd/topologies.c575
-rw-r--r--tests/subdir.am25
-rw-r--r--tests/topotests/all-protocol-startup/r1/ip_nht.ref12
-rw-r--r--tests/topotests/all-protocol-startup/test_all_protocol_startup.py52
-rw-r--r--tests/topotests/bfd-ospf-topo1/__init__.py0
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/bfdd.conf9
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/ospf6d.conf21
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/ospfd.conf26
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/step1/show_ip_route.ref74
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/step1/show_ipv6_route.ref70
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/step2/show_bfd_peers.ref26
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_healthy.ref28
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt2_down.ref15
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt3_down.ref15
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_healthy.ref74
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt2_down.ref74
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt3_down.ref74
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_healthy.ref70
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt2_down.ref70
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt3_down.ref70
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt1/zebra.conf25
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt2/bfdd.conf7
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt2/ospf6d.conf19
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt2/ospfd.conf24
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt2/step2/show_bfd_peers.ref14
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt2/zebra.conf22
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt3/bfdd.conf7
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt3/ospf6d.conf19
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt3/ospfd.conf24
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt3/step2/show_bfd_peers.ref14
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt3/zebra.conf22
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt4/bfdd.conf5
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt4/ospf6d.conf18
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt4/ospfd.conf23
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt4/zebra.conf22
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt5/bfdd.conf5
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt5/ospf6d.conf18
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt5/ospfd.conf23
-rw-r--r--tests/topotests/bfd-ospf-topo1/rt5/zebra.conf22
-rwxr-xr-xtests/topotests/bfd-ospf-topo1/test_bfd_ospf_topo1.py307
-rw-r--r--tests/topotests/bgp-evpn-mh/test_evpn_mh.py4
-rw-r--r--tests/topotests/bgp-evpn-vxlan_topo1/PE1/bgpd.conf1
-rw-r--r--tests/topotests/bgp-evpn-vxlan_topo1/PE1/evpn.vni.json4
-rw-r--r--tests/topotests/bgp-evpn-vxlan_topo1/PE2/bgpd.conf1
-rw-r--r--tests/topotests/bgp-evpn-vxlan_topo1/PE2/evpn.vni.json4
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/ce1/bgpd.conf12
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/ce1/snmpd.conf15
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/ce1/zebra.conf19
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/ce2/bgpd.conf12
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/ce2/snmpd.conf15
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/ce2/zebra.conf19
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/ce3/bgpd.conf12
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/ce3/snmpd.conf15
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/ce3/zebra.conf19
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/ce4/bgpd.conf12
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/ce4/snmpd.conf15
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/ce4/zebra.conf19
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r1/bgpd.conf48
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r1/isisd.conf46
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r1/snmpd.conf17
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r1/zebra.conf33
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r2/isisd.conf37
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r2/snmpd.conf15
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r2/zebra.conf24
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r3/isisd.conf45
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r3/snmpd.conf15
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r3/zebra.conf27
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r4/bgpd.conf43
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r4/isisd.conf36
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r4/snmpd.conf15
-rw-r--r--tests/topotests/bgp-snmp-mplsl3vpn/r4/zebra.conf27
-rwxr-xr-xtests/topotests/bgp-snmp-mplsl3vpn/test_bgp_snmp_mplsvpn.py738
-rw-r--r--tests/topotests/bgp_blackhole_community/__init__.py0
-rw-r--r--tests/topotests/bgp_blackhole_community/r1/bgpd.conf14
-rw-r--r--tests/topotests/bgp_blackhole_community/r1/zebra.conf10
-rw-r--r--tests/topotests/bgp_blackhole_community/r2/bgpd.conf8
-rw-r--r--tests/topotests/bgp_blackhole_community/r2/zebra.conf12
-rw-r--r--tests/topotests/bgp_blackhole_community/r3/bgpd.conf6
-rw-r--r--tests/topotests/bgp_blackhole_community/r3/zebra.conf6
-rw-r--r--tests/topotests/bgp_blackhole_community/r4/bgpd.conf6
-rw-r--r--tests/topotests/bgp_blackhole_community/r4/zebra.conf6
-rw-r--r--tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py156
-rw-r--r--tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py4
-rw-r--r--tests/topotests/example-test/test_template.py12
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py13
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py13
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py13
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/__init__.py0
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt1/isisd.conf27
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ip_route.ref89
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ipv6_route.ref65
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref32
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ip_route.ref82
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ipv6_route.ref59
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ip_route.ref62
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ipv6_route.ref40
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ip_route.ref89
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ipv6_route.ref65
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt1/zebra.conf19
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt2/isisd.conf35
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ip_route.ref77
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ipv6_route.ref40
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_yang_interface_isis_adjacencies.ref58
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt2/zebra.conf22
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt3/isisd.conf35
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ip_route.ref97
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ipv6_route.ref59
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_yang_interface_isis_adjacencies.ref51
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt3/zebra.conf22
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt4/isisd.conf42
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ip_route.ref94
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ipv6_route.ref21
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_yang_interface_isis_adjacencies.ref63
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt4/zebra.conf25
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt5/isisd.conf42
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ip_route.ref118
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ipv6_route.ref40
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_yang_interface_isis_adjacencies.ref63
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt5/zebra.conf25
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt6/isisd.conf32
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ip_route.ref107
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ipv6_route.ref46
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_yang_interface_isis_adjacencies.ref44
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ip_route.ref100
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ipv6_route.ref40
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ip_route.ref80
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ipv6_route.ref21
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ip_route.ref107
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ipv6_route.ref46
-rw-r--r--tests/topotests/isis-lsp-bits-topo1/rt6/zebra.conf22
-rwxr-xr-xtests/topotests/isis-lsp-bits-topo1/test_isis_lsp_bits_topo1.py353
-rw-r--r--tests/topotests/lib/common_config.py100
-rw-r--r--tests/topotests/lib/ospf.py187
-rw-r--r--tests/topotests/lib/pim.py161
-rwxr-xr-xtests/topotests/lib/send_bsr_packet.py58
-rw-r--r--tests/topotests/lib/snmptest.py152
-rw-r--r--tests/topotests/lib/topogen.py6
-rw-r--r--tests/topotests/lib/topojson.py3
-rw-r--r--tests/topotests/lib/topotest.py21
-rw-r--r--tests/topotests/multicast-pim-bsm-topo1/mcast_pim_bsmp_01.json238
-rw-r--r--tests/topotests/multicast-pim-bsm-topo1/test_mcast_pim_bsmp_01.py1653
-rw-r--r--tests/topotests/multicast-pim-bsm-topo2/mcast_pim_bsmp_02.json238
-rw-r--r--tests/topotests/multicast-pim-bsm-topo2/test_mcast_pim_bsmp_02.py1115
-rw-r--r--tests/topotests/multicast-pim-sm-topo1/multicast_pim_sm_topo1.json140
-rwxr-xr-xtests/topotests/multicast-pim-sm-topo1/test_multicast_pim_sm_topo1.py1698
-rw-r--r--tests/topotests/multicast-pim-sm-topo2/multicast_pim_sm_topo2.json140
-rwxr-xr-xtests/topotests/multicast-pim-sm-topo2/test_multicast_pim_sm_topo2.py1947
-rw-r--r--tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo3.json140
-rw-r--r--tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo4.json137
-rwxr-xr-xtests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo3.py4609
-rwxr-xr-xtests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo4.py1122
-rw-r--r--tests/topotests/multicast-pim-static-rp-topo1/__init__.py0
-rw-r--r--tests/topotests/multicast-pim-static-rp-topo1/multicast_pim_static_rp.json93
-rwxr-xr-xtests/topotests/multicast-pim-static-rp-topo1/test_multicast_pim_static_rp.py3810
-rw-r--r--tests/topotests/ospf-dual-stack/test_ospf_dual_stack.dot107
-rw-r--r--tests/topotests/ospf-dual-stack/test_ospf_dual_stack.jpgbin0 -> 98314 bytes
-rw-r--r--tests/topotests/ospf-dual-stack/test_ospf_dual_stack.json255
-rw-r--r--tests/topotests/ospf-dual-stack/test_ospf_dual_stack.py152
-rw-r--r--tests/topotests/ospf-tilfa-topo1/__init__.py0
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt1/ospfd.conf27
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt1/step1/show_ip_route_initial.ref156
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_initial.ref156
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_link_protection.ref226
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_initial.ref156
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_node_protection.ref192
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt1/zebra.conf17
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt2/ospfd.conf27
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt2/zebra.conf17
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt3/ospfd.conf27
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt3/zebra.conf17
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt4/ospfd.conf27
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt4/zebra.conf17
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt5/ospfd.conf27
-rw-r--r--tests/topotests/ospf-tilfa-topo1/rt5/zebra.conf17
-rw-r--r--tests/topotests/ospf-tilfa-topo1/test_ospf_tilfa_topo1.py242
-rw-r--r--tests/topotests/ospf6-topo1/r2/ip_6_address.nhg.ref10
-rw-r--r--tests/topotests/ospf6-topo1/r3/ip_6_address.nhg.ref10
-rw-r--r--tests/topotests/ospf6-topo1/r4/ip_6_address.nhg.ref10
-rw-r--r--tests/topotests/ospf6-topo1/test_ospf6_topo1.py8
-rw-r--r--tests/topotests/ospf_basic_functionality/ospf_chaos.json166
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_chaos.py576
-rw-r--r--tests/topotests/pim-basic/test_pim.py4
-rw-r--r--tests/topotests/pytest.ini31
-rw-r--r--tests/topotests/simple-snmp-test/r1/bgpd.conf6
-rw-r--r--tests/topotests/simple-snmp-test/r1/isisd.conf46
-rw-r--r--tests/topotests/simple-snmp-test/r1/snmpd.conf15
-rw-r--r--tests/topotests/simple-snmp-test/r1/zebra.conf22
-rwxr-xr-xtests/topotests/simple-snmp-test/test_simple_snmp.py152
-rw-r--r--tests/topotests/zebra_rib/r1/iproute.ref512
-rw-r--r--tests/topotests/zebra_rib/r1/sharp_rmap.ref17
-rw-r--r--tests/topotests/zebra_rib/r1/static_rmap.ref9
-rw-r--r--tests/topotests/zebra_rib/test_zebra_rib.py111
-rw-r--r--tools/.gitignore2
-rwxr-xr-xtools/checkpatch.pl4
-rwxr-xr-xtools/frr-reload.py28
-rw-r--r--tools/frr.service.in (renamed from tools/frr.service)7
-rw-r--r--tools/frr@.service.in (renamed from tools/frr@.service)7
-rw-r--r--vrrpd/vrrp.c2
-rw-r--r--vrrpd/vrrp_arp.c2
-rw-r--r--vtysh/vtysh.c16
-rw-r--r--watchfrr/watchfrr.c5
-rw-r--r--yang/frr-bgp.yang52
-rw-r--r--yang/frr-isisd.yang15
-rw-r--r--yang/frr-nexthop.yang6
-rw-r--r--zebra/debug.c38
-rw-r--r--zebra/debug.h5
-rw-r--r--zebra/dplane_fpm_nl.c24
-rw-r--r--zebra/if_netlink.c8
-rw-r--r--zebra/interface.c12
-rw-r--r--zebra/irdp_packet.c6
-rw-r--r--zebra/kernel_netlink.c38
-rw-r--r--zebra/kernel_netlink.h3
-rw-r--r--zebra/redistribute.c4
-rw-r--r--zebra/rib.h3
-rw-r--r--zebra/rt_netlink.c66
-rw-r--r--zebra/zapi_msg.c49
-rw-r--r--zebra/zapi_msg.h3
-rw-r--r--zebra/zebra_evpn_mac.c33
-rw-r--r--zebra/zebra_evpn_mac.h3
-rw-r--r--zebra/zebra_fpm.c10
-rw-r--r--zebra/zebra_nhg.c36
-rw-r--r--zebra/zebra_nhg.h7
-rw-r--r--zebra/zebra_pbr.c25
-rw-r--r--zebra/zebra_rib.c54
-rw-r--r--zebra/zebra_rnh.c2
-rw-r--r--zebra/zebra_routemap.c65
-rw-r--r--zebra/zebra_routemap.h2
-rw-r--r--zebra/zebra_vxlan.c88
-rw-r--r--zebra/zserv.c13
485 files changed, 51600 insertions, 4359 deletions
diff --git a/babeld/babel_interface.c b/babeld/babel_interface.c
index d37e0b608f..85d79bdc3b 100644
--- a/babeld/babel_interface.c
+++ b/babeld/babel_interface.c
@@ -693,7 +693,7 @@ interface_recalculate(struct interface *ifp)
rc = resize_receive_buffer(mtu);
if(rc < 0)
- zlog_warn("couldn't resize receive buffer for interface %s (%d) (%d bytes).\n",
+ zlog_warn("couldn't resize receive buffer for interface %s (%d) (%d bytes).",
ifp->name, ifp->ifindex, mtu);
memset(&mreq, 0, sizeof(mreq));
diff --git a/babeld/message.c b/babeld/message.c
index c9a10cb1c0..edb9806011 100644
--- a/babeld/message.c
+++ b/babeld/message.c
@@ -643,7 +643,7 @@ parse_packet(const unsigned char *from, struct interface *ifp,
return;
rtt = MAX(0, local_waiting_us - remote_waiting_us);
- debugf(BABEL_DEBUG_COMMON, "RTT to %s on %s sample result: %d us.\n",
+ debugf(BABEL_DEBUG_COMMON, "RTT to %s on %s sample result: %d us.",
format_address(from), ifp->name, rtt);
old_rttcost = neighbour_rttcost(neigh);
diff --git a/bfdd/bfd.c b/bfdd/bfd.c
index 9667ba8708..3e45bf0e04 100644
--- a/bfdd/bfd.c
+++ b/bfdd/bfd.c
@@ -313,6 +313,13 @@ int bfd_session_enable(struct bfd_session *bs)
}
}
+ if (!vrf_is_backend_netns() && vrf && vrf->vrf_id != VRF_DEFAULT
+ && !if_lookup_by_name(vrf->name, vrf->vrf_id)) {
+ zlog_err("session-enable: vrf interface %s not available yet",
+ vrf->name);
+ return 0;
+ }
+
if (bs->key.ifname[0]) {
if (vrf)
ifp = if_lookup_by_name(bs->key.ifname, vrf->vrf_id);
@@ -320,14 +327,16 @@ int bfd_session_enable(struct bfd_session *bs)
ifp = if_lookup_by_name_all_vrf(bs->key.ifname);
if (ifp == NULL) {
zlog_err(
- "session-enable: specified interface doesn't exists.");
+ "session-enable: specified interface %s (VRF %s) doesn't exist.",
+ bs->key.ifname, vrf ? vrf->name : "<all>");
return 0;
}
if (bs->key.ifname[0] && !vrf) {
vrf = vrf_lookup_by_id(ifp->vrf_id);
if (vrf == NULL) {
zlog_err(
- "session-enable: specified VRF doesn't exists.");
+ "session-enable: specified VRF %u doesn't exist.",
+ ifp->vrf_id);
return 0;
}
}
@@ -1663,15 +1672,54 @@ static bool bfd_id_hash_cmp(const void *n1, const void *n2)
static unsigned int bfd_key_hash_do(const void *p)
{
const struct bfd_session *bs = p;
+ struct bfd_key key = bs->key;
- return jhash(&bs->key, sizeof(bs->key), 0);
+ /*
+ * Local address and interface name are optional and
+ * can be filled any time after session creation.
+ * Hash key should not depend on these fields.
+ */
+ memset(&key.local, 0, sizeof(key.local));
+ memset(key.ifname, 0, sizeof(key.ifname));
+
+ return jhash(&key, sizeof(key), 0);
}
static bool bfd_key_hash_cmp(const void *n1, const void *n2)
{
const struct bfd_session *bs1 = n1, *bs2 = n2;
- return memcmp(&bs1->key, &bs2->key, sizeof(bs1->key)) == 0;
+ if (bs1->key.family != bs2->key.family)
+ return false;
+ if (bs1->key.mhop != bs2->key.mhop)
+ return false;
+ if (memcmp(&bs1->key.peer, &bs2->key.peer, sizeof(bs1->key.peer)))
+ return false;
+ if (memcmp(bs1->key.vrfname, bs2->key.vrfname,
+ sizeof(bs1->key.vrfname)))
+ return false;
+
+ /*
+ * Local address is optional and can be empty.
+ * If both addresses are not empty and different,
+ * then the keys are different.
+ */
+ if (memcmp(&bs1->key.local, &zero_addr, sizeof(bs1->key.local))
+ && memcmp(&bs2->key.local, &zero_addr, sizeof(bs2->key.local))
+ && memcmp(&bs1->key.local, &bs2->key.local, sizeof(bs1->key.local)))
+ return false;
+
+ /*
+ * Interface name is optional and can be empty.
+ * If both names are not empty and different,
+ * then the keys are different.
+ */
+ if (bs1->key.ifname[0] && bs2->key.ifname[0]
+ && memcmp(bs1->key.ifname, bs2->key.ifname,
+ sizeof(bs1->key.ifname)))
+ return false;
+
+ return true;
}
@@ -1689,117 +1737,13 @@ struct bfd_session *bfd_id_lookup(uint32_t id)
return hash_lookup(bfd_id_hash, &bs);
}
-struct bfd_key_walk_partial_lookup {
- struct bfd_session *given;
- struct bfd_session *result;
-};
-
-/* ignore some parameters */
-static int bfd_key_lookup_ignore_partial_walker(struct hash_bucket *b,
- void *data)
-{
- struct bfd_key_walk_partial_lookup *ctx =
- (struct bfd_key_walk_partial_lookup *)data;
- struct bfd_session *given = ctx->given;
- struct bfd_session *parsed = b->data;
-
- if (given->key.family != parsed->key.family)
- return HASHWALK_CONTINUE;
- if (given->key.mhop != parsed->key.mhop)
- return HASHWALK_CONTINUE;
- if (memcmp(&given->key.peer, &parsed->key.peer,
- sizeof(struct in6_addr)))
- return HASHWALK_CONTINUE;
- if (memcmp(given->key.vrfname, parsed->key.vrfname, MAXNAMELEN))
- return HASHWALK_CONTINUE;
- ctx->result = parsed;
- /* ignore localaddr or interface */
- return HASHWALK_ABORT;
-}
-
struct bfd_session *bfd_key_lookup(struct bfd_key key)
{
- struct bfd_session bs, *bsp;
- struct bfd_key_walk_partial_lookup ctx;
- char peer_buf[INET6_ADDRSTRLEN];
-
- bs.key = key;
- bsp = hash_lookup(bfd_key_hash, &bs);
- if (bsp)
- return bsp;
-
- inet_ntop(bs.key.family, &bs.key.peer, peer_buf,
- sizeof(peer_buf));
- /* Handle cases where local-address is optional. */
- if (memcmp(&bs.key.local, &zero_addr, sizeof(bs.key.local))) {
- memset(&bs.key.local, 0, sizeof(bs.key.local));
- bsp = hash_lookup(bfd_key_hash, &bs);
- if (bsp) {
- if (bglobal.debug_peer_event) {
- char addr_buf[INET6_ADDRSTRLEN];
- inet_ntop(bs.key.family, &key.local, addr_buf,
- sizeof(addr_buf));
- zlog_debug(
- " peer %s found, but loc-addr %s ignored",
- peer_buf, addr_buf);
- }
- return bsp;
- }
- }
-
- bs.key = key;
- /* Handle cases where ifname is optional. */
- if (bs.key.ifname[0]) {
- memset(bs.key.ifname, 0, sizeof(bs.key.ifname));
- bsp = hash_lookup(bfd_key_hash, &bs);
- if (bsp) {
- if (bglobal.debug_peer_event)
- zlog_debug(" peer %s found, but ifp %s ignored",
- peer_buf, key.ifname);
- return bsp;
- }
- }
+ struct bfd_session bs;
- /* Handle cases where local-address and ifname are optional. */
- if (bs.key.family == AF_INET) {
- memset(&bs.key.local, 0, sizeof(bs.key.local));
- bsp = hash_lookup(bfd_key_hash, &bs);
- if (bsp) {
- if (bglobal.debug_peer_event) {
- char addr_buf[INET6_ADDRSTRLEN];
- inet_ntop(bs.key.family, &bs.key.local,
- addr_buf, sizeof(addr_buf));
- zlog_debug(
- " peer %s found, but ifp %s and loc-addr %s ignored",
- peer_buf, key.ifname, addr_buf);
- }
- return bsp;
- }
- }
bs.key = key;
- /* Handle case where a context more complex ctx is present.
- * input has no iface nor local-address, but a context may
- * exist.
- *
- * Only applies to IPv4, because IPv6 requires either
- * local-address or interface.
- */
- if (!bs.key.mhop && bs.key.family == AF_INET) {
- ctx.result = NULL;
- ctx.given = &bs;
- hash_walk(bfd_key_hash, &bfd_key_lookup_ignore_partial_walker,
- &ctx);
- /* change key */
- if (ctx.result) {
- bsp = ctx.result;
- if (bglobal.debug_peer_event)
- zlog_debug(
- " peer %s found, but ifp and/or loc-addr params ignored",
- peer_buf);
- }
- }
- return bsp;
+ return hash_lookup(bfd_key_hash, &bs);
}
/*
@@ -1823,16 +1767,11 @@ struct bfd_session *bfd_id_delete(uint32_t id)
struct bfd_session *bfd_key_delete(struct bfd_key key)
{
- struct bfd_session bs, *bsp;
+ struct bfd_session bs;
bs.key = key;
- bsp = hash_lookup(bfd_key_hash, &bs);
- if (bsp == NULL && key.ifname[0]) {
- memset(bs.key.ifname, 0, sizeof(bs.key.ifname));
- bsp = hash_lookup(bfd_key_hash, &bs);
- }
- return hash_release(bfd_key_hash, bsp);
+ return hash_release(bfd_key_hash, &bs);
}
/* Iteration functions. */
diff --git a/bfdd/bfd_packet.c b/bfdd/bfd_packet.c
index 0a71c18a42..12bb52cf67 100644
--- a/bfdd/bfd_packet.c
+++ b/bfdd/bfd_packet.c
@@ -165,7 +165,7 @@ void ptm_bfd_echo_snd(struct bfd_session *bfd)
salen = sizeof(sin6);
} else {
sd = bvrf->bg_echo;
- memset(&sin6, 0, sizeof(sin6));
+ memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
memcpy(&sin.sin_addr, &bfd->key.peer, sizeof(sin.sin_addr));
sin.sin_port = htons(BFD_DEF_ECHO_PORT);
@@ -543,6 +543,7 @@ int bfd_recv_cb(struct thread *t)
ifindex_t ifindex = IFINDEX_INTERNAL;
struct sockaddr_any local, peer;
uint8_t msgbuf[1516];
+ struct interface *ifp = NULL;
struct bfd_vrf_global *bvrf = THREAD_ARG(t);
vrfid = bvrf->vrf->vrf_id;
@@ -572,6 +573,15 @@ int bfd_recv_cb(struct thread *t)
&local, &peer);
}
+ /* update vrf-id because when in vrf-lite mode,
+ * the socket is on default namespace
+ */
+ if (ifindex) {
+ ifp = if_lookup_by_index(ifindex, vrfid);
+ if (ifp)
+ vrfid = ifp->vrf_id;
+ }
+
/* Implement RFC 5880 6.8.6 */
if (mlen < BFD_PKT_LEN) {
cp_debug(is_mhop, &peer, &local, ifindex, vrfid,
@@ -951,8 +961,9 @@ int bp_peer_socket(const struct bfd_session *bs)
if (bs->key.ifname[0])
device_to_bind = (const char *)bs->key.ifname;
- else if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)
- && bs->key.vrfname[0])
+ else if ((!vrf_is_backend_netns() && bs->vrf->vrf_id != VRF_DEFAULT)
+ || ((CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)
+ && bs->key.vrfname[0])))
device_to_bind = (const char *)bs->key.vrfname;
frr_with_privs(&bglobal.bfdd_privs) {
@@ -1018,8 +1029,9 @@ int bp_peer_socketv6(const struct bfd_session *bs)
if (bs->key.ifname[0])
device_to_bind = (const char *)bs->key.ifname;
- else if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)
- && bs->key.vrfname[0])
+ else if ((!vrf_is_backend_netns() && bs->vrf->vrf_id != VRF_DEFAULT)
+ || ((CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)
+ && bs->key.vrfname[0])))
device_to_bind = (const char *)bs->key.vrfname;
frr_with_privs(&bglobal.bfdd_privs) {
diff --git a/bfdd/bfdd.c b/bfdd/bfdd.c
index b8a059708f..0cac990108 100644
--- a/bfdd/bfdd.c
+++ b/bfdd/bfdd.c
@@ -371,10 +371,6 @@ int main(int argc, char *argv[])
snprintf(ctl_path, sizeof(ctl_path), BFDD_CONTROL_SOCKET,
"/", bfdd_di.pathspace);
-#if 0 /* TODO add support for JSON configuration files. */
- parse_config(conf);
-#endif
-
/* Initialize FRR infrastructure. */
master = frr_init();
diff --git a/bfdd/dplane.c b/bfdd/dplane.c
index b8f0aadd93..66b79f3b13 100644
--- a/bfdd/dplane.c
+++ b/bfdd/dplane.c
@@ -169,8 +169,8 @@ static void bfd_dplane_debug_message(const struct bfddp_message *msg)
&msg->data.session.dst);
else
snprintfrr(addrs, sizeof(addrs), "src=%pI4 dst=%pI4",
- &msg->data.session.src,
- &msg->data.session.dst);
+ (struct in_addr *)&msg->data.session.src,
+ (struct in_addr *)&msg->data.session.dst);
buf[0] = 0;
if (flags & SESSION_CBIT)
diff --git a/bfdd/ptm_adapter.c b/bfdd/ptm_adapter.c
index 44519c47b5..0c70600f20 100644
--- a/bfdd/ptm_adapter.c
+++ b/bfdd/ptm_adapter.c
@@ -669,17 +669,24 @@ static void bfdd_sessions_enable_interface(struct interface *ifp)
struct bfd_session *bs;
struct vrf *vrf;
+ vrf = vrf_lookup_by_id(ifp->vrf_id);
+ if (!vrf)
+ return;
+
TAILQ_FOREACH(bso, &bglobal.bg_obslist, bso_entry) {
bs = bso->bso_bs;
- /* Interface name mismatch. */
- if (strcmp(ifp->name, bs->key.ifname))
- continue;
- vrf = vrf_lookup_by_id(ifp->vrf_id);
- if (!vrf)
- continue;
+ /* check vrf name */
if (bs->key.vrfname[0] &&
strcmp(vrf->name, bs->key.vrfname))
continue;
+
+ /* If Interface matches vrfname, then bypass iface check */
+ if (vrf_is_backend_netns() || strcmp(ifp->name, vrf->name)) {
+ /* Interface name mismatch. */
+ if (strcmp(ifp->name, bs->key.ifname))
+ continue;
+ }
+
/* Skip enabled sessions. */
if (bs->sock != -1)
continue;
@@ -696,11 +703,15 @@ static void bfdd_sessions_disable_interface(struct interface *ifp)
TAILQ_FOREACH(bso, &bglobal.bg_obslist, bso_entry) {
bs = bso->bso_bs;
- if (strcmp(ifp->name, bs->key.ifname))
+
+ if (bs->ifp != ifp)
continue;
+
/* Skip disabled sessions. */
- if (bs->sock == -1)
+ if (bs->sock == -1) {
+ bs->ifp = NULL;
continue;
+ }
bfd_session_disable(bs);
bs->ifp = NULL;
@@ -759,7 +770,8 @@ void bfdd_sessions_disable_vrf(struct vrf *vrf)
static int bfd_ifp_destroy(struct interface *ifp)
{
if (bglobal.debug_zebra)
- zlog_debug("zclient: delete interface %s", ifp->name);
+ zlog_debug("zclient: delete interface %s (VRF %u)", ifp->name,
+ ifp->vrf_id);
bfdd_sessions_disable_interface(ifp);
@@ -812,10 +824,10 @@ static int bfdd_interface_address_update(ZAPI_CALLBACK_ARGS)
return 0;
if (bglobal.debug_zebra)
- zlog_debug("zclient: %s local address %pFX",
+ zlog_debug("zclient: %s local address %pFX (VRF %u)",
cmd == ZEBRA_INTERFACE_ADDRESS_ADD ? "add"
: "delete",
- ifc->address);
+ ifc->address, vrf_id);
if (cmd == ZEBRA_INTERFACE_ADDRESS_ADD)
bfdd_sessions_enable_address(ifc);
@@ -828,8 +840,8 @@ static int bfdd_interface_address_update(ZAPI_CALLBACK_ARGS)
static int bfd_ifp_create(struct interface *ifp)
{
if (bglobal.debug_zebra)
- zlog_debug("zclient: add interface %s", ifp->name);
-
+ zlog_debug("zclient: add interface %s (VRF %u)", ifp->name,
+ ifp->vrf_id);
bfdd_sessions_enable_interface(ifp);
return 0;
diff --git a/bgpd/bgp_aspath.h b/bgpd/bgp_aspath.h
index 9df352fcd6..4b16818167 100644
--- a/bgpd/bgp_aspath.h
+++ b/bgpd/bgp_aspath.h
@@ -32,7 +32,7 @@
/* Private AS range defined in RFC2270. */
#define BGP_PRIVATE_AS_MIN 64512U
-#define BGP_PRIVATE_AS_MAX 65535U
+#define BGP_PRIVATE_AS_MAX UINT16_MAX
/* Private 4 byte AS range defined in RFC6996. */
#define BGP_PRIVATE_AS4_MIN 4200000000U
@@ -40,7 +40,7 @@
/* we leave BGP_AS_MAX as the 16bit AS MAX number. */
#define BGP_AS_ZERO 0
-#define BGP_AS_MAX 65535U
+#define BGP_AS_MAX UINT16_MAX
#define BGP_AS4_MAX 4294967295U
/* Transition 16Bit AS as defined by IANA */
#define BGP_AS_TRANS 23456U
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index ce22e8404d..dc8cc81042 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -1753,14 +1753,22 @@ static int bgp_attr_aggregator(struct bgp_attr_parser_args *args)
attr->aggregator_as = aggregator_as;
attr->aggregator_addr.s_addr = stream_get_ipv4(peer->curr);
- /* Set atomic aggregate flag. */
- attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR);
-
/* Codification of AS 0 Processing */
- if (aggregator_as == BGP_AS_ZERO)
+ if (aggregator_as == BGP_AS_ZERO) {
flog_err(EC_BGP_ATTR_LEN,
- "AGGREGATOR AS number is 0 for aspath: %s",
- aspath_print(attr->aspath));
+ "%s: AGGREGATOR AS number is 0 for aspath: %s",
+ peer->host, aspath_print(attr->aspath));
+
+ if (bgp_debug_update(peer, NULL, NULL, 1)) {
+ char attr_str[BUFSIZ] = {0};
+
+ bgp_dump_attr(attr, attr_str, sizeof(attr_str));
+
+ zlog_debug("%s: attributes: %s", __func__, attr_str);
+ }
+ } else {
+ attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR);
+ }
return BGP_ATTR_PARSE_PROCEED;
}
@@ -1784,16 +1792,26 @@ bgp_attr_as4_aggregator(struct bgp_attr_parser_args *args,
}
aggregator_as = stream_getl(peer->curr);
+
*as4_aggregator_as = aggregator_as;
as4_aggregator_addr->s_addr = stream_get_ipv4(peer->curr);
- attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR);
-
/* Codification of AS 0 Processing */
- if (aggregator_as == BGP_AS_ZERO)
+ if (aggregator_as == BGP_AS_ZERO) {
flog_err(EC_BGP_ATTR_LEN,
- "AS4_AGGREGATOR AS number is 0 for aspath: %s",
- aspath_print(attr->aspath));
+ "%s: AS4_AGGREGATOR AS number is 0 for aspath: %s",
+ peer->host, aspath_print(attr->aspath));
+
+ if (bgp_debug_update(peer, NULL, NULL, 1)) {
+ char attr_str[BUFSIZ] = {0};
+
+ bgp_dump_attr(attr, attr_str, sizeof(attr_str));
+
+ zlog_debug("%s: attributes: %s", __func__, attr_str);
+ }
+ } else {
+ attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR);
+ }
return BGP_ATTR_PARSE_PROCEED;
}
@@ -3395,7 +3413,8 @@ void bgp_attr_extcom_tunnel_type(struct attr *attr,
bgp_encap_types *tunnel_type)
{
struct ecommunity *ecom;
- int i;
+ uint32_t i;
+
if (!attr)
return;
@@ -3900,7 +3919,7 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer,
/* Is ASN representable in 2-bytes? Or must AS_TRANS be
* used? */
- if (attr->aggregator_as > 65535) {
+ if (attr->aggregator_as > UINT16_MAX) {
stream_putw(s, BGP_AS_TRANS);
/* we have to send AS4_AGGREGATOR, too.
@@ -4021,7 +4040,7 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer,
uint8_t *pnt;
int tbit;
int ecom_tr_size = 0;
- int i;
+ uint32_t i;
for (i = 0; i < attr->ecommunity->size; i++) {
pnt = attr->ecommunity->val + (i * 8);
diff --git a/bgpd/bgp_attr_evpn.c b/bgpd/bgp_attr_evpn.c
index 7cc9ecd79e..1df646c346 100644
--- a/bgpd/bgp_attr_evpn.c
+++ b/bgpd/bgp_attr_evpn.c
@@ -89,7 +89,7 @@ char *ecom_mac2str(char *ecom_mac)
/* Fetch router-mac from extended community */
bool bgp_attr_rmac(struct attr *attr, struct ethaddr *rmac)
{
- int i = 0;
+ uint32_t i = 0;
struct ecommunity *ecom;
ecom = attr->ecommunity;
@@ -122,7 +122,7 @@ bool bgp_attr_rmac(struct attr *attr, struct ethaddr *rmac)
uint8_t bgp_attr_default_gw(struct attr *attr)
{
struct ecommunity *ecom;
- int i;
+ uint32_t i;
ecom = attr->ecommunity;
if (!ecom || !ecom->size)
@@ -153,7 +153,7 @@ uint8_t bgp_attr_default_gw(struct attr *attr)
uint16_t bgp_attr_df_pref_from_ec(struct attr *attr, uint8_t *alg)
{
struct ecommunity *ecom;
- int i;
+ uint32_t i;
uint16_t df_pref = 0;
*alg = EVPN_MH_DF_ALG_SERVICE_CARVING;
@@ -190,7 +190,7 @@ uint16_t bgp_attr_df_pref_from_ec(struct attr *attr, uint8_t *alg)
uint32_t bgp_attr_mac_mobility_seqnum(struct attr *attr, uint8_t *sticky)
{
struct ecommunity *ecom;
- int i;
+ uint32_t i;
uint8_t flags = 0;
ecom = attr->ecommunity;
@@ -237,7 +237,7 @@ void bgp_attr_evpn_na_flag(struct attr *attr,
uint8_t *router_flag, bool *proxy)
{
struct ecommunity *ecom;
- int i;
+ uint32_t i;
uint8_t val;
ecom = attr->ecommunity;
diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c
index 82e27884cf..c93713668f 100644
--- a/bgpd/bgp_bmp.c
+++ b/bgpd/bgp_bmp.c
@@ -434,10 +434,19 @@ static struct stream *bmp_peerstate(struct peer *peer, bool down)
case PEER_DOWN_CLOSE_SESSION:
type = BMP_PEERDOWN_REMOTE_CLOSE;
break;
+ case PEER_DOWN_WAITING_NHT:
+ type = BMP_PEERDOWN_LOCAL_FSM;
+ stream_putw(s, BGP_FSM_TcpConnectionFails);
+ break;
+ /*
+ * TODO: Map remaining PEER_DOWN_* reasons to RFC event codes.
+ * TODO: Implement BMP_PEERDOWN_LOCAL_NOTIFY.
+ *
+ * See RFC7854 ss. 4.9
+ */
default:
- type = BMP_PEERDOWN_LOCAL_NOTIFY;
- stream_put(s, peer->last_reset_cause,
- peer->last_reset_cause_size);
+ type = BMP_PEERDOWN_LOCAL_FSM;
+ stream_putw(s, BMP_PEER_DOWN_NO_RELEVANT_EVENT_CODE);
break;
}
stream_putc_at(s, type_pos, type);
diff --git a/bgpd/bgp_bmp.h b/bgpd/bgp_bmp.h
index d6b22d0cbc..2c3ba570ee 100644
--- a/bgpd/bgp_bmp.h
+++ b/bgpd/bgp_bmp.h
@@ -269,6 +269,8 @@ struct bmp_bgp_peer {
/* per struct bgp * data */
PREDECL_HASH(bmp_bgph)
+#define BMP_PEER_DOWN_NO_RELEVANT_EVENT_CODE 0x00
+
struct bmp_bgp {
struct bmp_bgph_item bbi;
diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c
index 6ac6cf56dd..e17cce3ff6 100644
--- a/bgpd/bgp_clist.c
+++ b/bgpd/bgp_clist.c
@@ -654,86 +654,6 @@ static bool ecommunity_regexp_match(struct ecommunity *ecom, regex_t *reg)
return false;
}
-#if 0
-/* Delete community attribute using regular expression match. Return
- modified communites attribute. */
-static struct community *
-community_regexp_delete (struct community *com, regex_t * reg)
-{
- int i;
- uint32_t comval;
- /* Maximum is "65535:65535" + '\0'. */
- char c[12];
- const char *str;
-
- if (!com)
- return NULL;
-
- i = 0;
- while (i < com->size)
- {
- memcpy (&comval, com_nthval (com, i), sizeof(uint32_t));
- comval = ntohl (comval);
-
- switch (comval) {
- case COMMUNITY_INTERNET:
- str = "internet";
- break;
- case COMMUNITY_ACCEPT_OWN:
- str = "accept-own";
- break;
- case COMMUNITY_ROUTE_FILTER_TRANSLATED_v4:
- str = "route-filter-translated-v4";
- break;
- case COMMUNITY_ROUTE_FILTER_v4:
- str = "route-filter-v4";
- break;
- case COMMUNITY_ROUTE_FILTER_TRANSLATED_v6:
- str = "route-filter-translated-v6";
- break;
- case COMMUNITY_ROUTE_FILTER_v6:
- str = "route-filter-v6";
- break;
- case COMMUNITY_LLGR_STALE:
- str = "llgr-stale";
- break;
- case COMMUNITY_NO_LLGR:
- str = "no-llgr";
- break;
- case COMMUNITY_ACCEPT_OWN_NEXTHOP:
- str = "accept-own-nexthop";
- break;
- case COMMUNITY_BLACKHOLE:
- str = "blackhole";
- break;
- case COMMUNITY_NO_EXPORT:
- str = "no-export";
- break;
- case COMMUNITY_NO_ADVERTISE:
- str = "no-advertise";
- break;
- case COMMUNITY_LOCAL_AS:
- str = "local-AS";
- break;
- case COMMUNITY_NO_PEER:
- str = "no-peer";
- break;
- default:
- sprintf (c, "%d:%d", (comval >> 16) & 0xFFFF,
- comval & 0xFFFF);
- str = c;
- break;
- }
-
- if (regexec (reg, str, 0, NULL, 0) == 0)
- community_del_val (com, com_nthval (com, i));
- else
- i++;
- }
- return com;
-}
-#endif
-
/* When given community attribute matches to the community-list return
1 else return 0. */
bool community_list_match(struct community *com, struct community_list *list)
diff --git a/bgpd/bgp_community.c b/bgpd/bgp_community.c
index f722a8dbc7..43138b82f6 100644
--- a/bgpd/bgp_community.c
+++ b/bgpd/bgp_community.c
@@ -56,7 +56,7 @@ void community_free(struct community **com)
}
/* Add one community value to the community. */
-static void community_add_val(struct community *com, uint32_t val)
+void community_add_val(struct community *com, uint32_t val)
{
com->size++;
if (com->val)
diff --git a/bgpd/bgp_community.h b/bgpd/bgp_community.h
index b99f38ab64..2a1fbf526a 100644
--- a/bgpd/bgp_community.h
+++ b/bgpd/bgp_community.h
@@ -88,6 +88,7 @@ extern struct community *community_delete(struct community *,
struct community *);
extern struct community *community_dup(struct community *);
extern bool community_include(struct community *, uint32_t);
+extern void community_add_val(struct community *com, uint32_t val);
extern void community_del_val(struct community *, uint32_t *);
extern unsigned long community_count(void);
extern struct hash *community_hash(void);
diff --git a/bgpd/bgp_dump.c b/bgpd/bgp_dump.c
index 975bba9314..944a5848ec 100644
--- a/bgpd/bgp_dump.c
+++ b/bgpd/bgp_dump.c
@@ -300,6 +300,13 @@ static void bgp_dump_routes_index_table(struct bgp *bgp)
fflush(bgp_dump_routes.fp);
}
+static int bgp_addpath_encode_rx(struct peer *peer, afi_t afi, safi_t safi)
+{
+
+ return (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_RX_ADV)
+ && CHECK_FLAG(peer->af_cap[afi][safi],
+ PEER_CAP_ADDPATH_AF_TX_RCV));
+}
static struct bgp_path_info *
bgp_dump_route_node_record(int afi, struct bgp_dest *dest,
@@ -308,16 +315,27 @@ bgp_dump_route_node_record(int afi, struct bgp_dest *dest,
struct stream *obuf;
size_t sizep;
size_t endp;
+ int addpath_encoded;
const struct prefix *p = bgp_dest_get_prefix(dest);
obuf = bgp_dump_obuf;
stream_reset(obuf);
+ addpath_encoded = bgp_addpath_encode_rx(path->peer, afi, SAFI_UNICAST);
+
/* MRT header */
- if (afi == AFI_IP)
+ if (afi == AFI_IP && addpath_encoded)
+ bgp_dump_header(obuf, MSG_TABLE_DUMP_V2,
+ TABLE_DUMP_V2_RIB_IPV4_UNICAST_ADDPATH,
+ BGP_DUMP_ROUTES);
+ else if (afi == AFI_IP)
bgp_dump_header(obuf, MSG_TABLE_DUMP_V2,
TABLE_DUMP_V2_RIB_IPV4_UNICAST,
BGP_DUMP_ROUTES);
+ else if (afi == AFI_IP6 && addpath_encoded)
+ bgp_dump_header(obuf, MSG_TABLE_DUMP_V2,
+ TABLE_DUMP_V2_RIB_IPV6_UNICAST_ADDPATH,
+ BGP_DUMP_ROUTES);
else if (afi == AFI_IP6)
bgp_dump_header(obuf, MSG_TABLE_DUMP_V2,
TABLE_DUMP_V2_RIB_IPV6_UNICAST,
@@ -361,6 +379,11 @@ bgp_dump_route_node_record(int afi, struct bgp_dest *dest,
/* Originated */
stream_putl(obuf, time(NULL) - (bgp_clock() - path->uptime));
+ /*Path Identifier*/
+ if (addpath_encoded) {
+ stream_putl(obuf, path->addpath_rx_id);
+ }
+
/* Dump attribute. */
/* Skip prefix & AFI/SAFI for MP_NLRI */
bgp_dump_routes_attr(obuf, path->attr, p);
@@ -528,19 +551,32 @@ static void bgp_dump_packet_func(struct bgp_dump *bgp_dump, struct peer *peer,
struct stream *packet)
{
struct stream *obuf;
-
+ int addpath_encoded = 0;
/* If dump file pointer is disabled return immediately. */
if (bgp_dump->fp == NULL)
return;
+ if (peer->su.sa.sa_family == AF_INET) {
+ addpath_encoded =
+ bgp_addpath_encode_rx(peer, AFI_IP, SAFI_UNICAST);
+ } else if (peer->su.sa.sa_family == AF_INET6) {
+ addpath_encoded =
+ bgp_addpath_encode_rx(peer, AFI_IP6, SAFI_UNICAST);
+ }
/* Make dump stream. */
obuf = bgp_dump_obuf;
stream_reset(obuf);
/* Dump header and common part. */
- if (CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV)) {
+ if (CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV) && addpath_encoded) {
+ bgp_dump_header(obuf, MSG_PROTOCOL_BGP4MP,
+ BGP4MP_MESSAGE_AS4_ADDPATH, bgp_dump->type);
+ } else if (CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV)) {
bgp_dump_header(obuf, MSG_PROTOCOL_BGP4MP, BGP4MP_MESSAGE_AS4,
bgp_dump->type);
+ } else if (addpath_encoded) {
+ bgp_dump_header(obuf, MSG_PROTOCOL_BGP4MP,
+ BGP4MP_MESSAGE_ADDPATH, bgp_dump->type);
} else {
bgp_dump_header(obuf, MSG_PROTOCOL_BGP4MP, BGP4MP_MESSAGE,
bgp_dump->type);
diff --git a/bgpd/bgp_dump.h b/bgpd/bgp_dump.h
index 86c80d481c..a8cbd8ed2c 100644
--- a/bgpd/bgp_dump.h
+++ b/bgpd/bgp_dump.h
@@ -27,22 +27,30 @@
#define MSG_PROTOCOL_BGP4MP_ET 17
/* subtype value */
-#define BGP4MP_STATE_CHANGE 0
-#define BGP4MP_MESSAGE 1
-#define BGP4MP_ENTRY 2
-#define BGP4MP_SNAPSHOT 3
-#define BGP4MP_MESSAGE_AS4 4
-#define BGP4MP_STATE_CHANGE_AS4 5
+#define BGP4MP_STATE_CHANGE 0
+#define BGP4MP_MESSAGE 1
+#define BGP4MP_ENTRY 2
+#define BGP4MP_SNAPSHOT 3
+#define BGP4MP_MESSAGE_AS4 4
+#define BGP4MP_STATE_CHANGE_AS4 5
+#define BGP4MP_MESSAGE_ADDPATH 8
+#define BGP4MP_MESSAGE_AS4_ADDPATH 9
+#define BGP4MP_MESSAGE_LOCAL_ADDPATH 10
+#define BGP4MP_MESSAGE_AS4_LOCAL_ADDPATH 11
#define BGP_DUMP_HEADER_SIZE 12
#define BGP_DUMP_MSG_HEADER 40
-#define TABLE_DUMP_V2_PEER_INDEX_TABLE 1
-#define TABLE_DUMP_V2_RIB_IPV4_UNICAST 2
+#define TABLE_DUMP_V2_PEER_INDEX_TABLE 1
+#define TABLE_DUMP_V2_RIB_IPV4_UNICAST 2
#define TABLE_DUMP_V2_RIB_IPV4_MULTICAST 3
-#define TABLE_DUMP_V2_RIB_IPV6_UNICAST 4
+#define TABLE_DUMP_V2_RIB_IPV6_UNICAST 4
#define TABLE_DUMP_V2_RIB_IPV6_MULTICAST 5
-#define TABLE_DUMP_V2_RIB_GENERIC 6
+#define TABLE_DUMP_V2_RIB_IPV4_UNICAST_ADDPATH 8
+#define TABLE_DUMP_V2_RIB_IPV4_MULTICAST_ADDPATH 9
+#define TABLE_DUMP_V2_RIB_IPV6_UNICAST_ADDPATH 10
+#define TABLE_DUMP_V2_RIB_IPV6_MULTICAST_ADDPATH 11
+#define TABLE_DUMP_V2_RIB_GENERIC_ADDPATH 12
#define TABLE_DUMP_V2_PEER_INDEX_TABLE_IP 0
#define TABLE_DUMP_V2_PEER_INDEX_TABLE_IP6 1
diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c
index 43bfb3e2bc..7f6f61e141 100644
--- a/bgpd/bgp_ecommunity.c
+++ b/bgpd/bgp_ecommunity.c
@@ -95,7 +95,7 @@ static bool ecommunity_add_val_internal(struct ecommunity *ecom,
bool unique, bool overwrite,
uint8_t ecom_size)
{
- int c, ins_idx;
+ uint32_t c, ins_idx;
const struct ecommunity_val *eval4 = (struct ecommunity_val *)eval;
const struct ecommunity_val_ipv6 *eval6 =
(struct ecommunity_val_ipv6 *)eval;
@@ -113,7 +113,7 @@ static bool ecommunity_add_val_internal(struct ecommunity *ecom,
/* check also if the extended community itself exists. */
c = 0;
- ins_idx = -1;
+ ins_idx = UINT32_MAX;
for (uint8_t *p = ecom->val; c < ecom->size;
p += ecom_size, c++) {
if (unique) {
@@ -145,12 +145,12 @@ static bool ecommunity_add_val_internal(struct ecommunity *ecom,
if (ret > 0) {
if (!unique)
break;
- if (ins_idx == -1)
+ if (ins_idx == UINT32_MAX)
ins_idx = c;
}
}
- if (ins_idx == -1)
+ if (ins_idx == UINT32_MAX)
ins_idx = c;
/* Add the value to the structure with numerical sorting. */
@@ -193,7 +193,7 @@ static struct ecommunity *
ecommunity_uniq_sort_internal(struct ecommunity *ecom,
unsigned short ecom_size)
{
- int i;
+ uint32_t i;
struct ecommunity *new;
const void *eval;
@@ -895,11 +895,10 @@ static int ecommunity_lb_str(char *buf, size_t bufsz, const uint8_t *pnt)
*/
char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter)
{
- int i;
+ uint32_t i;
uint8_t *pnt;
uint8_t type = 0;
uint8_t sub_type = 0;
-#define ECOMMUNITY_STRLEN 64
int str_size;
char *str_buf;
@@ -1176,8 +1175,8 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter)
bool ecommunity_match(const struct ecommunity *ecom1,
const struct ecommunity *ecom2)
{
- int i = 0;
- int j = 0;
+ uint32_t i = 0;
+ uint32_t j = 0;
if (ecom1 == NULL && ecom2 == NULL)
return true;
@@ -1209,7 +1208,7 @@ extern struct ecommunity_val *ecommunity_lookup(const struct ecommunity *ecom,
uint8_t type, uint8_t subtype)
{
uint8_t *p;
- int c;
+ uint32_t c;
/* If the value already exists in the structure return 0. */
c = 0;
@@ -1230,7 +1229,7 @@ bool ecommunity_strip(struct ecommunity *ecom, uint8_t type,
uint8_t subtype)
{
uint8_t *p, *q, *new;
- int c, found = 0;
+ uint32_t c, found = 0;
/* When this is fist value, just add it. */
if (ecom == NULL || ecom->val == NULL)
return false;
@@ -1278,7 +1277,7 @@ bool ecommunity_strip(struct ecommunity *ecom, uint8_t type,
bool ecommunity_del_val(struct ecommunity *ecom, struct ecommunity_val *eval)
{
uint8_t *p;
- int c, found = 0;
+ uint32_t c, found = 0;
/* Make sure specified value exists. */
if (ecom == NULL || ecom->val == NULL)
@@ -1512,7 +1511,7 @@ void bgp_remove_ecomm_from_aggregate_hash(struct bgp_aggregate *aggregate,
const uint8_t *ecommunity_linkbw_present(struct ecommunity *ecom, uint32_t *bw)
{
const uint8_t *eval;
- int i;
+ uint32_t i;
if (bw)
*bw = 0;
diff --git a/bgpd/bgp_ecommunity.h b/bgpd/bgp_ecommunity.h
index 6318e7edb1..03b23fcd37 100644
--- a/bgpd/bgp_ecommunity.h
+++ b/bgpd/bgp_ecommunity.h
@@ -103,6 +103,9 @@
/* Extended Communities type flag. */
#define ECOMMUNITY_FLAG_NON_TRANSITIVE 0x40
+/* Extended Community readable string length */
+#define ECOMMUNITY_STRLEN 64
+
/* Extended Communities attribute. */
struct ecommunity {
/* Reference counter. */
@@ -114,7 +117,7 @@ struct ecommunity {
uint8_t unit_size;
/* Size of Extended Communities attribute. */
- int size;
+ uint32_t size;
/* Extended Communities value. */
uint8_t *val;
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index 96f4b0aa78..c976632678 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -870,7 +870,7 @@ static void add_mac_mobility_to_attr(uint32_t seq_num, struct attr *attr)
struct ecommunity ecom_tmp;
struct ecommunity_val eval;
uint8_t *ecom_val_ptr;
- int i;
+ uint32_t i;
uint8_t *pnt;
int type = 0;
int sub_type = 0;
@@ -2710,7 +2710,7 @@ static int is_route_matching_for_vrf(struct bgp *bgp_vrf,
{
struct attr *attr = pi->attr;
struct ecommunity *ecom;
- int i;
+ uint32_t i;
assert(attr);
/* Route should have valid RT to be even considered. */
@@ -2777,7 +2777,7 @@ static int is_route_matching_for_vni(struct bgp *bgp, struct bgpevpn *vpn,
{
struct attr *attr = pi->attr;
struct ecommunity *ecom;
- int i;
+ uint32_t i;
assert(attr);
/* Route should have valid RT to be even considered. */
@@ -3260,7 +3260,7 @@ static int bgp_evpn_install_uninstall_table(struct bgp *bgp, afi_t afi,
struct prefix_evpn *evp = (struct prefix_evpn *)p;
struct attr *attr = pi->attr;
struct ecommunity *ecom;
- int i;
+ uint32_t i;
struct prefix_evpn ad_evp;
assert(attr);
@@ -4906,7 +4906,7 @@ int bgp_nlri_parse_evpn(struct peer *peer, struct attr *attr,
*/
void bgp_evpn_map_vrf_to_its_rts(struct bgp *bgp_vrf)
{
- int i = 0;
+ uint32_t i = 0;
struct ecommunity_val *eval = NULL;
struct listnode *node = NULL, *nnode = NULL;
struct ecommunity *ecom = NULL;
@@ -4926,7 +4926,7 @@ void bgp_evpn_map_vrf_to_its_rts(struct bgp *bgp_vrf)
*/
void bgp_evpn_unmap_vrf_from_its_rts(struct bgp *bgp_vrf)
{
- int i;
+ uint32_t i;
struct ecommunity_val *eval;
struct listnode *node, *nnode;
struct ecommunity *ecom;
@@ -4963,7 +4963,7 @@ void bgp_evpn_unmap_vrf_from_its_rts(struct bgp *bgp_vrf)
*/
void bgp_evpn_map_vni_to_its_rts(struct bgp *bgp, struct bgpevpn *vpn)
{
- int i;
+ uint32_t i;
struct ecommunity_val *eval;
struct listnode *node, *nnode;
struct ecommunity *ecom;
@@ -4983,7 +4983,7 @@ void bgp_evpn_map_vni_to_its_rts(struct bgp *bgp, struct bgpevpn *vpn)
*/
void bgp_evpn_unmap_vni_from_its_rts(struct bgp *bgp, struct bgpevpn *vpn)
{
- int i;
+ uint32_t i;
struct ecommunity_val *eval;
struct listnode *node, *nnode;
struct ecommunity *ecom;
@@ -5370,11 +5370,11 @@ int bgp_evpn_local_l3vni_add(vni_t l3vni, vrf_id_t vrf_id,
switch (ret) {
case BGP_ERR_AS_MISMATCH:
flog_err(EC_BGP_EVPN_AS_MISMATCH,
- "BGP is already running; AS is %u\n", as);
+ "BGP is already running; AS is %u", as);
return -1;
case BGP_ERR_INSTANCE_MISMATCH:
flog_err(EC_BGP_EVPN_INSTANCE_MISMATCH,
- "BGP instance name and AS number mismatch\n");
+ "BGP instance name and AS number mismatch");
return -1;
}
diff --git a/bgpd/bgp_evpn_mh.c b/bgpd/bgp_evpn_mh.c
index b5678af91a..2dec0863c0 100644
--- a/bgpd/bgp_evpn_mh.c
+++ b/bgpd/bgp_evpn_mh.c
@@ -991,6 +991,10 @@ static void bgp_evpn_local_type1_evi_route_add(struct bgp *bgp,
struct prefix_evpn p;
struct bgp_evpn_es_evi *es_evi;
+ /* EAD-per-EVI routes have been suppressed */
+ if (!bgp_mh_info->ead_evi_tx)
+ return;
+
if (CHECK_FLAG(es->flags, BGP_EVPNES_ADV_EVI))
/* EAD-EVI route add for this ES is already done */
return;
@@ -2718,14 +2722,20 @@ static void bgp_evpn_es_evi_vtep_re_eval_active(struct bgp *bgp,
{
bool old_active;
bool new_active;
+ uint32_t ead_activity_flags;
old_active = !!CHECK_FLAG(evi_vtep->flags, BGP_EVPN_EVI_VTEP_ACTIVE);
- /* Both EAD-per-ES and EAD-per-EVI routes must be rxed from a PE
- * before it can be activated.
- */
- if ((evi_vtep->flags & BGP_EVPN_EVI_VTEP_EAD) ==
- BGP_EVPN_EVI_VTEP_EAD)
+ if (bgp_mh_info->ead_evi_rx)
+ /* Both EAD-per-ES and EAD-per-EVI routes must be rxed from a PE
+ * before it can be activated.
+ */
+ ead_activity_flags = BGP_EVPN_EVI_VTEP_EAD;
+ else
+ /* EAD-per-ES is sufficent to activate the PE */
+ ead_activity_flags = BGP_EVPN_EVI_VTEP_EAD_PER_ES;
+
+ if ((evi_vtep->flags & ead_activity_flags) == ead_activity_flags)
SET_FLAG(evi_vtep->flags, BGP_EVPN_EVI_VTEP_ACTIVE);
else
UNSET_FLAG(evi_vtep->flags, BGP_EVPN_EVI_VTEP_ACTIVE);
@@ -3076,9 +3086,9 @@ int bgp_evpn_local_es_evi_add(struct bgp *bgp, esi_t *esi, vni_t vni)
bgp_evpn_es_evi_local_info_set(es_evi);
/* generate an EAD-EVI for this new VNI */
- build_evpn_type1_prefix(&p, BGP_EVPN_AD_EVI_ETH_TAG,
- &es->esi, es->originator_ip);
if (CHECK_FLAG(es->flags, BGP_EVPNES_ADV_EVI)) {
+ build_evpn_type1_prefix(&p, BGP_EVPN_AD_EVI_ETH_TAG, &es->esi,
+ es->originator_ip);
if (bgp_evpn_type1_route_update(bgp, es, vpn, &p))
flog_err(EC_BGP_EVPN_ROUTE_CREATE,
"%u: EAD-EVI route creation failure for ESI %s VNI %u",
@@ -3718,6 +3728,9 @@ void bgp_evpn_mh_init(void)
bgp_mh_info->pend_es_list = list_new();
listset_app_node_mem(bgp_mh_info->pend_es_list);
+ bgp_mh_info->ead_evi_rx = BGP_EVPN_MH_EAD_EVI_RX_DEF;
+ bgp_mh_info->ead_evi_tx = BGP_EVPN_MH_EAD_EVI_TX_DEF;
+
/* config knobs - XXX add cli to control it */
bgp_mh_info->ead_evi_adv_for_down_links = true;
bgp_mh_info->consistency_checking = true;
diff --git a/bgpd/bgp_evpn_mh.h b/bgpd/bgp_evpn_mh.h
index b6d2a7faac..6199113e87 100644
--- a/bgpd/bgp_evpn_mh.h
+++ b/bgpd/bgp_evpn_mh.h
@@ -261,6 +261,15 @@ struct bgp_evpn_mh_info {
/* Use L3 NHGs for host routes in symmetric IRB */
bool install_l3nhg;
bool host_routes_use_l3nhg;
+ /* Some vendors are not generating the EAD-per-EVI route. This knob
+ * can be turned off to activate a remote ES-PE when the EAD-per-ES
+ * route is rxed i.e. not wait on the EAD-per-EVI route
+ */
+ bool ead_evi_rx;
+#define BGP_EVPN_MH_EAD_EVI_RX_DEF true
+ /* Skip EAD-EVI advertisements by turning off this knob */
+ bool ead_evi_tx;
+#define BGP_EVPN_MH_EAD_EVI_TX_DEF true
};
/****************************************************************************/
diff --git a/bgpd/bgp_evpn_private.h b/bgpd/bgp_evpn_private.h
index 6a7d124938..77b3746344 100644
--- a/bgpd/bgp_evpn_private.h
+++ b/bgpd/bgp_evpn_private.h
@@ -630,13 +630,5 @@ extern struct bgp_dest *
bgp_global_evpn_node_lookup(struct bgp_table *table, afi_t afi, safi_t safi,
const struct prefix_evpn *evp,
struct prefix_rd *prd);
-extern struct bgp_node *bgp_global_evpn_node_get(struct bgp_table *table,
- afi_t afi, safi_t safi,
- const struct prefix_evpn *evp,
- struct prefix_rd *prd);
-extern struct bgp_node *
-bgp_global_evpn_node_lookup(struct bgp_table *table, afi_t afi, safi_t safi,
- const struct prefix_evpn *evp,
- struct prefix_rd *prd);
extern void bgp_evpn_import_route_in_vrfs(struct bgp_path_info *pi, int import);
#endif /* _BGP_EVPN_PRIVATE_H */
diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c
index 0f7320562c..5b0b3bb6e5 100644
--- a/bgpd/bgp_evpn_vty.c
+++ b/bgpd/bgp_evpn_vty.c
@@ -3761,6 +3761,26 @@ DEFPY (bgp_evpn_use_es_l3nhg,
return CMD_SUCCESS;
}
+DEFPY (bgp_evpn_ead_evi_rx_disable,
+ bgp_evpn_ead_evi_rx_disable_cmd,
+ "[no$no] disable-ead-evi-rx",
+ NO_STR
+ "Activate PE on EAD-ES even if EAD-EVI is not received\n")
+{
+ bgp_mh_info->ead_evi_rx = no? true :false;
+ return CMD_SUCCESS;
+}
+
+DEFPY (bgp_evpn_ead_evi_tx_disable,
+ bgp_evpn_ead_evi_tx_disable_cmd,
+ "[no$no] disable-ead-evi-tx",
+ NO_STR
+ "Don't advertise EAD-EVI for local ESs\n")
+{
+ bgp_mh_info->ead_evi_tx = no? true :false;
+ return CMD_SUCCESS;
+}
+
DEFPY (bgp_evpn_advertise_pip_ip_mac,
bgp_evpn_advertise_pip_ip_mac_cmd,
"[no$no] advertise-pip [ip <A.B.C.D> [mac <X:X:X:X:X:X|X:X:X:X:X:X/M>]]",
@@ -4080,7 +4100,7 @@ DEFPY(show_bgp_l2vpn_evpn_es_vrf, show_bgp_l2vpn_evpn_es_vrf_cmd,
*/
DEFUN(show_bgp_l2vpn_evpn_summary,
show_bgp_l2vpn_evpn_summary_cmd,
- "show bgp [vrf VRFNAME] l2vpn evpn summary [established|failed] [json]",
+ "show bgp [vrf VRFNAME] l2vpn evpn summary [established|failed] [wide] [json]",
SHOW_STR
BGP_STR
"bgp vrf\n"
@@ -4090,23 +4110,30 @@ DEFUN(show_bgp_l2vpn_evpn_summary,
"Summary of BGP neighbor status\n"
"Show only sessions in Established state\n"
"Show only sessions not in Established state\n"
+ "Increase table width for longer output\n"
JSON_STR)
{
int idx_vrf = 0;
- bool uj = use_json(argc, argv);
+ int idx = 0;
char *vrf = NULL;
- bool show_failed = false;
- bool show_established = false;
+ uint8_t show_flags = 0;
if (argv_find(argv, argc, "vrf", &idx_vrf))
vrf = argv[++idx_vrf]->arg;
- if (argv_find(argv, argc, "failed", &idx_vrf))
- show_failed = true;
- if (argv_find(argv, argc, "established", &idx_vrf))
- show_established = true;
- return bgp_show_summary_vty(vty, vrf, AFI_L2VPN, SAFI_EVPN, show_failed,
- show_established, uj);
+ if (argv_find(argv, argc, "failed", &idx))
+ SET_FLAG(show_flags, BGP_SHOW_OPT_FAILED);
+
+ if (argv_find(argv, argc, "established", &idx))
+ SET_FLAG(show_flags, BGP_SHOW_OPT_ESTABLISHED);
+
+ if (argv_find(argv, argc, "wide", &idx))
+ SET_FLAG(show_flags, BGP_SHOW_OPT_WIDE);
+
+ if (use_json(argc, argv))
+ SET_FLAG(show_flags, BGP_SHOW_OPT_JSON);
+
+ return bgp_show_summary_vty(vty, vrf, AFI_L2VPN, SAFI_EVPN, show_flags);
}
int bgp_evpn_cli_parse_type(int *type, struct cmd_token **argv, int argc)
@@ -5755,6 +5782,20 @@ void bgp_config_write_evpn_info(struct vty *vty, struct bgp *bgp, afi_t afi,
vty_out(vty, " no use-es-l3nhg\n");
}
+ if (bgp_mh_info->ead_evi_rx != BGP_EVPN_MH_EAD_EVI_RX_DEF) {
+ if (bgp_mh_info->ead_evi_rx)
+ vty_out(vty, " no disable-ead-evi-rx\n");
+ else
+ vty_out(vty, " disable-ead-evi-rx\n");
+ }
+
+ if (bgp_mh_info->ead_evi_tx != BGP_EVPN_MH_EAD_EVI_TX_DEF) {
+ if (bgp_mh_info->ead_evi_tx)
+ vty_out(vty, " no disable-ead-evi-tx\n");
+ else
+ vty_out(vty, " disable-ead-evi-tx\n");
+ }
+
if (!bgp->evpn_info->dup_addr_detect)
vty_out(vty, " no dup-addr-detection\n");
@@ -5900,6 +5941,8 @@ void bgp_ethernetvpn_init(void)
install_element(BGP_EVPN_NODE, &bgp_evpn_flood_control_cmd);
install_element(BGP_EVPN_NODE, &bgp_evpn_advertise_pip_ip_mac_cmd);
install_element(BGP_EVPN_NODE, &bgp_evpn_use_es_l3nhg_cmd);
+ install_element(BGP_EVPN_NODE, &bgp_evpn_ead_evi_rx_disable_cmd);
+ install_element(BGP_EVPN_NODE, &bgp_evpn_ead_evi_tx_disable_cmd);
/* test commands */
install_element(BGP_EVPN_NODE, &test_es_add_cmd);
diff --git a/bgpd/bgp_flowspec_util.c b/bgpd/bgp_flowspec_util.c
index 15b891f25a..b244c87258 100644
--- a/bgpd/bgp_flowspec_util.c
+++ b/bgpd/bgp_flowspec_util.c
@@ -637,7 +637,7 @@ int bgp_flowspec_match_rules_fill(uint8_t *nlri_content, int len,
offset += ret;
break;
default:
- flog_err(EC_LIB_DEVELOPMENT, "%s: unknown type %d\n",
+ flog_err(EC_LIB_DEVELOPMENT, "%s: unknown type %d",
__func__, type);
}
}
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index cec4a9339a..b69e2d71b6 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -110,9 +110,9 @@ static int bgp_peer_reg_with_nht(struct peer *peer)
&& !CHECK_FLAG(peer->bgp->flags, BGP_FLAG_DISABLE_NH_CONNECTED_CHK))
connected = 1;
- return bgp_find_or_add_nexthop(
- peer->bgp, peer->bgp, family2afi(peer->su.sa.sa_family),
- NULL, peer, connected);
+ return bgp_find_or_add_nexthop(peer->bgp, peer->bgp,
+ family2afi(peer->su.sa.sa_family),
+ SAFI_UNICAST, NULL, peer, connected);
}
static void peer_xfer_stats(struct peer *peer_dst, struct peer *peer_src)
@@ -1420,19 +1420,6 @@ int bgp_stop(struct peer *peer)
peer->update_time = 0;
-/* Until we are sure that there is no problem about prefix count
- this should be commented out.*/
-#if 0
- /* Reset prefix count */
- peer->pcount[AFI_IP][SAFI_UNICAST] = 0;
- peer->pcount[AFI_IP][SAFI_MULTICAST] = 0;
- peer->pcount[AFI_IP][SAFI_LABELED_UNICAST] = 0;
- peer->pcount[AFI_IP][SAFI_MPLS_VPN] = 0;
- peer->pcount[AFI_IP6][SAFI_UNICAST] = 0;
- peer->pcount[AFI_IP6][SAFI_MULTICAST] = 0;
- peer->pcount[AFI_IP6][SAFI_LABELED_UNICAST] = 0;
-#endif /* 0 */
-
if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)
&& !(CHECK_FLAG(peer->flags, PEER_FLAG_DELETE))) {
peer_delete(peer);
diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c
index 287555b1fc..3cb3d06217 100644
--- a/bgpd/bgp_main.c
+++ b/bgpd/bgp_main.c
@@ -60,6 +60,7 @@
#include "bgpd/bgp_keepalives.h"
#include "bgpd/bgp_network.h"
#include "bgpd/bgp_errors.h"
+#include "bgpd/bgp_script.h"
#include "lib/routing_nb.h"
#include "bgpd/bgp_nb.h"
#include "bgpd/bgp_evpn_mh.h"
@@ -510,6 +511,10 @@ int main(int argc, char **argv)
/* Initializations. */
bgp_vrf_init();
+#ifdef HAVE_SCRIPTING
+ bgp_script_init();
+#endif
+
hook_register(routing_conf_event,
routing_control_plane_protocols_name_validate);
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index 3bc4c03233..d9acda8bd0 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -419,8 +419,7 @@ int vpn_leak_label_callback(
static bool ecom_intersect(struct ecommunity *e1, struct ecommunity *e2)
{
- int i;
- int j;
+ uint32_t i, j;
if (!e1 || !e2)
return false;
@@ -591,8 +590,8 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
* TBD do we need to do anything about the
* 'connected' parameter?
*/
- nh_valid = bgp_find_or_add_nexthop(bgp, bgp_nexthop,
- afi, bpi, NULL, 0);
+ nh_valid = bgp_find_or_add_nexthop(
+ bgp, bgp_nexthop, afi, safi, bpi, NULL, 0);
if (debug)
zlog_debug("%s: nexthop is %svalid (in vrf %s)",
@@ -657,8 +656,8 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
* TBD do we need to do anything about the
* 'connected' parameter?
*/
- nh_valid = bgp_find_or_add_nexthop(bgp, bgp_nexthop,
- afi, new, NULL, 0);
+ nh_valid = bgp_find_or_add_nexthop(bgp, bgp_nexthop, afi, safi,
+ new, NULL, 0);
if (debug)
zlog_debug("%s: nexthop is %svalid (in vrf %s)",
diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h
index df2544d608..91a073d5d7 100644
--- a/bgpd/bgp_mplsvpn.h
+++ b/bgpd/bgp_mplsvpn.h
@@ -83,6 +83,21 @@ extern void vrf_import_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
void vrf_unimport_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
afi_t afi, safi_t safi);
+static inline bool is_bgp_vrf_mplsvpn(struct bgp *bgp)
+{
+ afi_t afi;
+
+ if (bgp->inst_type == BGP_INSTANCE_TYPE_VRF)
+ for (afi = 0; afi < AFI_MAX; ++afi) {
+ if (CHECK_FLAG(bgp->af_flags[afi][SAFI_UNICAST],
+ BGP_CONFIG_VRF_TO_MPLSVPN_EXPORT)
+ || CHECK_FLAG(bgp->af_flags[afi][SAFI_UNICAST],
+ BGP_CONFIG_MPLSVPN_TO_VRF_IMPORT))
+ return true;
+ }
+ return false;
+}
+
static inline int vpn_leak_to_vpn_active(struct bgp *bgp_vrf, afi_t afi,
const char **pmsg)
{
diff --git a/bgpd/bgp_mplsvpn_snmp.c b/bgpd/bgp_mplsvpn_snmp.c
new file mode 100644
index 0000000000..055bae8432
--- /dev/null
+++ b/bgpd/bgp_mplsvpn_snmp.c
@@ -0,0 +1,1689 @@
+/* MPLS/BGP L3VPN MIB
+ * Copyright (C) 2020 Volta Networks Inc
+ *
+ * This file is part of FRR.
+ *
+ * FRRouting is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRRouting is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include <net-snmp/net-snmp-config.h>
+#include <net-snmp/net-snmp-includes.h>
+
+#include "if.h"
+#include "log.h"
+#include "prefix.h"
+#include "command.h"
+#include "thread.h"
+#include "smux.h"
+#include "filter.h"
+#include "hook.h"
+#include "libfrr.h"
+#include "version.h"
+
+#include "bgpd/bgpd.h"
+#include "bgpd/bgp_route.h"
+#include "bgpd/bgp_attr.h"
+#include "bgpd/bgp_ecommunity.h"
+#include "bgpd/bgp_mplsvpn.h"
+#include "bgpd/bgp_mplsvpn_snmp.h"
+
+#define BGP_mplsvpn_notif_enable_true 1
+#define BGP_mplsvpn_notif_enable_false 2
+
+/* MPLSL3VPN MIB described in RFC4382 */
+#define MPLSL3VPNMIB 1, 3, 6, 1, 2, 1, 10, 166, 11
+
+/* MPLSL3VPN Scalars */
+#define MPLSL3VPNCONFIGUREDVRFS 1
+#define MPLSL3VPNACTIVEVRFS 2
+#define MPLSL3VPNCONNECTEDINTERFACES 3
+#define MPLSL3VPNNOTIFICATIONENABLE 4
+#define MPLSL3VPNCONFMAXPOSSRTS 5
+#define MPLSL3VPNVRFCONFRTEMXTHRSHTIME 6
+#define MPLSL3VPNILLLBLRCVTHRSH 7
+
+/* MPLSL3VPN IFConf Table */
+#define MPLSL3VPNIFVPNCLASSIFICATION 1
+#define MPLSL3VPNIFCONFSTORAGETYPE 2
+#define MPLSL3VPNIFCONFROWSTATUS 3
+
+/* MPLSL3VPN VRF Table */
+#define MPLSL3VPNVRFVPNID 1
+#define MPLSL3VPNVRFDESC 2
+#define MPLSL3VPNVRFRD 3
+#define MPLSL3VPNVRFCREATIONTIME 4
+#define MPLSL3VPNVRFOPERSTATUS 5
+#define MPLSL3VPNVRFACTIVEINTERFACES 6
+#define MPLSL3VPNVRFASSOCIATEDINTERFACES 7
+#define MPLSL3VPNVRFCONFMIDRTETHRESH 8
+#define MPLSL3VPNVRFCONFHIGHRTETHRSH 9
+#define MPLSL3VPNVRFCONFMAXROUTES 10
+#define MPLSL3VPNVRFCONFLASTCHANGED 11
+#define MPLSL3VPNVRFCONFROWSTATUS 12
+#define MPLSL3VPNVRFCONFADMINSTATUS 13
+#define MPLSL3VPNVRFCONFSTORAGETYPE 14
+
+/* MPLSL3VPN RT Table */
+#define MPLSL3VPNVRFRT 1
+#define MPLSL3VPNVRFRTDESCR 2
+#define MPLSL3VPNVRFRTROWSTATUS 3
+#define MPLSL3VPNVRFRTSTORAGETYPE 4
+
+/* MPLSL3VPN PERF Table */
+#define MPLSL3VPNVRFPERFROUTESADDED 1
+#define MPLSL3VPNVRFPERFROUTESDELETED 2
+#define MPLSL3VPNVRFPERFCURRNUMROUTES 3
+
+/* MPLSL3VPN RTE Table */
+#define MPLSL3VPNVRFRTEINETCIDRDESTTYPE 1
+#define MPLSL3VPNVRFRTEINETCIDRDEST 2
+#define MPLSL3VPNVRFRTEINETCIDRPFXLEN 3
+#define MPLSL3VPNVRFRTEINETCIDRPOLICY 4
+#define MPLSL3VPNVRFRTEINETCIDRNHOPTYPE 5
+#define MPLSL3VPNVRFRTEINETCIDRNEXTHOP 6
+#define MPLSL3VPNVRFRTEINETCIDRIFINDEX 7
+#define MPLSL3VPNVRFRTEINETCIDRTYPE 8
+#define MPLSL3VPNVRFRTEINETCIDRPROTO 9
+#define MPLSL3VPNVRFRTEINETCIDRAGE 10
+#define MPLSL3VPNVRFRTEINETCIDRNEXTHOPAS 11
+#define MPLSL3VPNVRFRTEINETCIDRMETRIC1 12
+#define MPLSL3VPNVRFRTEINETCIDRMETRIC2 13
+#define MPLSL3VPNVRFRTEINETCIDRMETRIC3 14
+#define MPLSL3VPNVRFRTEINETCIDRMETRIC4 15
+#define MPLSL3VPNVRFRTEINETCIDRMETRIC5 16
+#define MPLSL3VPNVRFRTEINETCIDRXCPOINTER 17
+#define MPLSL3VPNVRFRTEINETCIDRSTATUS 18
+
+/* BGP Trap */
+#define MPLSL3VPNVRFUP 1
+#define MPLSL3VPNDOWN 2
+
+/* SNMP value hack. */
+#define INTEGER ASN_INTEGER
+#define INTEGER32 ASN_INTEGER
+#define COUNTER32 ASN_COUNTER
+#define OCTET_STRING ASN_OCTET_STR
+#define IPADDRESS ASN_IPADDRESS
+#define GAUGE32 ASN_UNSIGNED
+#define TIMETICKS ASN_TIMETICKS
+#define OID ASN_OBJECT_ID
+
+/* Declare static local variables for convenience. */
+SNMP_LOCAL_VARIABLES
+
+#define RT_PREAMBLE_SIZE 20
+
+/* BGP-MPLS-MIB instances */
+static oid mpls_l3vpn_oid[] = {MPLSL3VPNMIB};
+static oid mpls_l3vpn_trap_oid[] = {MPLSL3VPNMIB, 0};
+static char rd_buf[RD_ADDRSTRLEN];
+/* Notifications enabled by default */
+static uint8_t bgp_mplsvpn_notif_enable = SNMP_TRUE;
+static oid mpls_l3vpn_policy_oid[2] = {0, 0};
+static const char *empty_nhop = "";
+char rt_description[VRF_NAMSIZ + RT_PREAMBLE_SIZE];
+
+static uint8_t *mplsL3vpnConfiguredVrfs(struct variable *, oid[], size_t *, int,
+ size_t *, WriteMethod **);
+
+static uint8_t *mplsL3vpnActiveVrfs(struct variable *, oid[], size_t *, int,
+ size_t *, WriteMethod **);
+
+static uint8_t *mplsL3vpnConnectedInterfaces(struct variable *, oid[], size_t *,
+ int, size_t *, WriteMethod **);
+
+static uint8_t *mplsL3vpnNotificationEnable(struct variable *, oid[], size_t *,
+ int, size_t *, WriteMethod **);
+
+static uint8_t *mplsL3vpnVrfConfMaxPossRts(struct variable *, oid[], size_t *,
+ int, size_t *, WriteMethod **);
+
+static uint8_t *mplsL3vpnVrfConfRteMxThrshTime(struct variable *, oid[],
+ size_t *, int, size_t *,
+ WriteMethod **);
+
+static uint8_t *mplsL3vpnIllLblRcvThrsh(struct variable *, oid[], size_t *, int,
+ size_t *, WriteMethod **);
+
+static uint8_t *mplsL3vpnVrfTable(struct variable *, oid[], size_t *, int,
+ size_t *, WriteMethod **);
+
+static uint8_t *mplsL3vpnVrfRtTable(struct variable *, oid[], size_t *, int,
+ size_t *, WriteMethod **);
+
+static uint8_t *mplsL3vpnIfConfTable(struct variable *, oid[], size_t *, int,
+ size_t *, WriteMethod **);
+
+static uint8_t *mplsL3vpnPerfTable(struct variable *, oid[], size_t *, int,
+ size_t *, WriteMethod **);
+
+static uint8_t *mplsL3vpnRteTable(struct variable *, oid[], size_t *, int,
+ size_t *, WriteMethod **);
+
+
+static struct variable mpls_l3vpn_variables[] = {
+ /* BGP version. */
+ {MPLSL3VPNCONFIGUREDVRFS,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnConfiguredVrfs,
+ 3,
+ {1, 1, 1} },
+ {MPLSL3VPNACTIVEVRFS,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnActiveVrfs,
+ 3,
+ {1, 1, 2} },
+ {MPLSL3VPNCONNECTEDINTERFACES,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnConnectedInterfaces,
+ 3,
+ {1, 1, 3} },
+ {MPLSL3VPNNOTIFICATIONENABLE,
+ INTEGER,
+ RWRITE,
+ mplsL3vpnNotificationEnable,
+ 3,
+ {1, 1, 4} },
+ {MPLSL3VPNCONFMAXPOSSRTS,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnVrfConfMaxPossRts,
+ 3,
+ {1, 1, 5} },
+ {MPLSL3VPNVRFCONFRTEMXTHRSHTIME,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnVrfConfRteMxThrshTime,
+ 3,
+ {1, 1, 6} },
+ {MPLSL3VPNILLLBLRCVTHRSH,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnIllLblRcvThrsh,
+ 3,
+ {1, 1, 7} },
+
+ /* Ifconf Table */
+ {MPLSL3VPNIFVPNCLASSIFICATION,
+ INTEGER,
+ RONLY,
+ mplsL3vpnIfConfTable,
+ 5,
+ {1, 2, 1, 1, 2} },
+ {MPLSL3VPNIFCONFSTORAGETYPE,
+ INTEGER,
+ RONLY,
+ mplsL3vpnIfConfTable,
+ 5,
+ {1, 2, 1, 1, 4} },
+ {MPLSL3VPNIFCONFROWSTATUS,
+ INTEGER,
+ RONLY,
+ mplsL3vpnIfConfTable,
+ 5,
+ {1, 2, 1, 1, 5} },
+
+ /* mplsL3VpnVrf Table */
+ {MPLSL3VPNVRFVPNID,
+ OCTET_STRING,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 2} },
+ {MPLSL3VPNVRFDESC,
+ OCTET_STRING,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 3} },
+ {MPLSL3VPNVRFRD,
+ OCTET_STRING,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 4} },
+ {MPLSL3VPNVRFCREATIONTIME,
+ TIMETICKS,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 5} },
+ {MPLSL3VPNVRFOPERSTATUS,
+ INTEGER,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 6} },
+ {MPLSL3VPNVRFACTIVEINTERFACES,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 7} },
+ {MPLSL3VPNVRFASSOCIATEDINTERFACES,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 8} },
+ {MPLSL3VPNVRFCONFMIDRTETHRESH,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 9} },
+ {MPLSL3VPNVRFCONFHIGHRTETHRSH,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 10} },
+ {MPLSL3VPNVRFCONFMAXROUTES,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 11} },
+ {MPLSL3VPNVRFCONFLASTCHANGED,
+ TIMETICKS,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 12} },
+ {MPLSL3VPNVRFCONFROWSTATUS,
+ INTEGER,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 13} },
+ {MPLSL3VPNVRFCONFADMINSTATUS,
+ INTEGER,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 14} },
+ {MPLSL3VPNVRFCONFSTORAGETYPE,
+ INTEGER,
+ RONLY,
+ mplsL3vpnVrfTable,
+ 5,
+ {1, 2, 2, 1, 15} },
+
+ /* mplsL3vpnVrfRt Table */
+ {MPLSL3VPNVRFRT,
+ OCTET_STRING,
+ RONLY,
+ mplsL3vpnVrfRtTable,
+ 5,
+ {1, 2, 3, 1, 4} },
+ {MPLSL3VPNVRFRTDESCR,
+ OCTET_STRING,
+ RONLY,
+ mplsL3vpnVrfRtTable,
+ 5,
+ {1, 2, 3, 1, 5} },
+ {MPLSL3VPNVRFRTROWSTATUS,
+ INTEGER,
+ RONLY,
+ mplsL3vpnVrfRtTable,
+ 5,
+ {1, 2, 3, 1, 6} },
+ {MPLSL3VPNVRFRTSTORAGETYPE,
+ INTEGER,
+ RONLY,
+ mplsL3vpnVrfRtTable,
+ 5,
+ {1, 2, 3, 1, 7} },
+
+ /* mplsL3VpnPerfTable */
+ {MPLSL3VPNVRFPERFROUTESADDED,
+ COUNTER32,
+ RONLY,
+ mplsL3vpnPerfTable,
+ 5,
+ {1, 3, 1, 1, 1} },
+ {MPLSL3VPNVRFPERFROUTESDELETED,
+ COUNTER32,
+ RONLY,
+ mplsL3vpnPerfTable,
+ 5,
+ {1, 3, 1, 1, 2} },
+ {MPLSL3VPNVRFPERFCURRNUMROUTES,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnPerfTable,
+ 5,
+ {1, 3, 1, 1, 3} },
+
+ /* mplsVpnRteTable */
+ {MPLSL3VPNVRFRTEINETCIDRDESTTYPE,
+ INTEGER,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 1} },
+ {MPLSL3VPNVRFRTEINETCIDRDEST,
+ OCTET_STRING,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 2} },
+ {MPLSL3VPNVRFRTEINETCIDRPFXLEN,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 3} },
+ {MPLSL3VPNVRFRTEINETCIDRPOLICY,
+ OID,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 4} },
+ {MPLSL3VPNVRFRTEINETCIDRNHOPTYPE,
+ INTEGER,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 5} },
+ {MPLSL3VPNVRFRTEINETCIDRNEXTHOP,
+ OCTET_STRING,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 6} },
+ {MPLSL3VPNVRFRTEINETCIDRIFINDEX,
+ INTEGER,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 7} },
+ {MPLSL3VPNVRFRTEINETCIDRTYPE,
+ INTEGER,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 8} },
+ {MPLSL3VPNVRFRTEINETCIDRPROTO,
+ INTEGER,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 9} },
+ {MPLSL3VPNVRFRTEINETCIDRAGE,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 10} },
+ {MPLSL3VPNVRFRTEINETCIDRNEXTHOPAS,
+ GAUGE32,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 11} },
+ {MPLSL3VPNVRFRTEINETCIDRMETRIC1,
+ INTEGER,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 12} },
+ {MPLSL3VPNVRFRTEINETCIDRMETRIC2,
+ INTEGER,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 13} },
+ {MPLSL3VPNVRFRTEINETCIDRMETRIC3,
+ INTEGER,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 14} },
+ {MPLSL3VPNVRFRTEINETCIDRMETRIC4,
+ INTEGER,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 15} },
+ {MPLSL3VPNVRFRTEINETCIDRMETRIC5,
+ INTEGER,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 16} },
+ {MPLSL3VPNVRFRTEINETCIDRXCPOINTER,
+ OCTET_STRING,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 17} },
+ {MPLSL3VPNVRFRTEINETCIDRSTATUS,
+ INTEGER,
+ RONLY,
+ mplsL3vpnRteTable,
+ 5,
+ {1, 4, 1, 1, 18} },
+};
+
+/* timeticks are in hundredths of a second */
+static void bgp_mpls_l3vpn_update_timeticks(time_t *counter)
+{
+ struct timeval tv;
+
+ monotime(&tv);
+ *counter = (tv.tv_sec * 100) + (tv.tv_usec / 10000);
+}
+
+static int bgp_mpls_l3vpn_update_last_changed(struct bgp *bgp)
+{
+ if (bgp->snmp_stats)
+ bgp_mpls_l3vpn_update_timeticks(
+ &(bgp->snmp_stats->modify_time));
+ return 0;
+}
+
+static uint32_t bgp_mpls_l3vpn_current_routes(struct bgp *l3vpn_bgp)
+{
+ uint32_t count = 0;
+ struct bgp_table *table;
+ struct bgp_dest *dest;
+ struct bgp_path_info *pi;
+
+ table = l3vpn_bgp->rib[AFI_IP][SAFI_UNICAST];
+ for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) {
+ pi = bgp_dest_get_bgp_path_info(dest);
+ for (; pi; pi = pi->next)
+ count++;
+ }
+ table = l3vpn_bgp->rib[AFI_IP6][SAFI_UNICAST];
+ for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) {
+ pi = bgp_dest_get_bgp_path_info(dest);
+ for (; pi; pi = pi->next)
+ count++;
+ }
+ return count;
+}
+
+static int bgp_init_snmp_stats(struct bgp *bgp)
+{
+ if (is_bgp_vrf_mplsvpn(bgp)) {
+ if (bgp->snmp_stats == NULL) {
+ bgp->snmp_stats = XCALLOC(
+ MTYPE_BGP, sizeof(struct bgp_snmp_stats));
+ /* fix up added routes */
+ if (bgp->snmp_stats) {
+ bgp->snmp_stats->routes_added =
+ bgp_mpls_l3vpn_current_routes(bgp);
+ bgp_mpls_l3vpn_update_timeticks(
+ &(bgp->snmp_stats->creation_time));
+ }
+ }
+ } else {
+ if (bgp->snmp_stats) {
+ XFREE(MTYPE_BGP, bgp->snmp_stats);
+ bgp->snmp_stats = NULL;
+ }
+ }
+ /* Something changed - update the timestamp */
+ bgp_mpls_l3vpn_update_last_changed(bgp);
+ return 0;
+}
+
+static int bgp_snmp_update_route_stats(struct bgp_dest *dest,
+ struct bgp_path_info *pi, bool added)
+{
+ struct bgp_table *table;
+
+ if (dest) {
+ table = bgp_dest_table(dest);
+ /* only update if we have a stats block - MPLSVPN vrfs for now*/
+ if (table && table->bgp && table->bgp->snmp_stats) {
+ if (added)
+ table->bgp->snmp_stats->routes_added++;
+ else
+ table->bgp->snmp_stats->routes_deleted++;
+ }
+ }
+ return 0;
+}
+
+static bool is_bgp_vrf_active(struct bgp *bgp)
+{
+ struct vrf *vrf;
+ struct interface *ifp;
+
+ /* if there is one interface in the vrf which is up then it is deemed
+ * active
+ */
+ vrf = vrf_lookup_by_id(bgp->vrf_id);
+ if (vrf == NULL)
+ return false;
+ RB_FOREACH (ifp, if_name_head, &vrf->ifaces_by_name) {
+ /* if we are in a vrf skip the l3mdev */
+ if (bgp->name && strncmp(ifp->name, bgp->name, VRF_NAMSIZ) == 0)
+ continue;
+
+ if (if_is_up(ifp))
+ return true;
+ }
+ return false;
+}
+
+/* BGP Traps. */
+static struct trap_object l3vpn_trap_list[] = {{5, {1, 2, 1, 1, 5} },
+ {5, {1, 2, 2, 1, 6} } };
+
+static int bgp_vrf_check_update_active(struct bgp *bgp, struct interface *ifp)
+{
+ bool new_active = false;
+ oid trap;
+ struct index_oid trap_index[2];
+
+ if (!is_bgp_vrf_mplsvpn(bgp) || bgp->snmp_stats == NULL
+ || !bgp_mplsvpn_notif_enable)
+ return 0;
+ new_active = is_bgp_vrf_active(bgp);
+ if (bgp->snmp_stats->active != new_active) {
+ /* add trap in here */
+ bgp->snmp_stats->active = new_active;
+
+ /* send relevent trap */
+ if (bgp->snmp_stats->active)
+ trap = MPLSL3VPNVRFUP;
+ else
+ trap = MPLSL3VPNDOWN;
+
+ /*
+ * first index vrf_name + ifindex
+ * second index vrf_name
+ */
+ trap_index[1].indexlen = strnlen(bgp->name, VRF_NAMSIZ);
+ oid_copy_str(trap_index[0].indexname, bgp->name,
+ trap_index[1].indexlen);
+ oid_copy_str(trap_index[1].indexname, bgp->name,
+ trap_index[1].indexlen);
+ trap_index[0].indexlen =
+ trap_index[1].indexlen + sizeof(ifindex_t);
+ oid_copy_int(trap_index[0].indexname + trap_index[1].indexlen,
+ (int *)&(ifp->ifindex));
+
+ smux_trap_multi_index(
+ mpls_l3vpn_variables, array_size(mpls_l3vpn_variables),
+ mpls_l3vpn_trap_oid, array_size(mpls_l3vpn_trap_oid),
+ mpls_l3vpn_oid, sizeof(mpls_l3vpn_oid) / sizeof(oid),
+ trap_index, array_size(trap_index), l3vpn_trap_list,
+ array_size(l3vpn_trap_list), trap);
+ }
+ bgp_mpls_l3vpn_update_last_changed(bgp);
+ return 0;
+}
+
+static uint8_t *mplsL3vpnConfiguredVrfs(struct variable *v, oid name[],
+ size_t *length, int exact,
+ size_t *var_len,
+ WriteMethod **write_method)
+{
+ struct listnode *node, *nnode;
+ struct bgp *bgp;
+ uint32_t count = 0;
+
+ if (smux_header_generic(v, name, length, exact, var_len, write_method)
+ == MATCH_FAILED)
+ return NULL;
+
+ for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) {
+ if (is_bgp_vrf_mplsvpn(bgp))
+ count++;
+ }
+ return SNMP_INTEGER(count);
+}
+
+static uint8_t *mplsL3vpnActiveVrfs(struct variable *v, oid name[],
+ size_t *length, int exact, size_t *var_len,
+ WriteMethod **write_method)
+{
+ struct listnode *node, *nnode;
+ struct bgp *bgp;
+ uint32_t count = 0;
+
+ if (smux_header_generic(v, name, length, exact, var_len, write_method)
+ == MATCH_FAILED)
+ return NULL;
+
+ for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) {
+ if (is_bgp_vrf_mplsvpn(bgp) && is_bgp_vrf_active(bgp))
+ count++;
+ }
+ return SNMP_INTEGER(count);
+}
+
+static uint8_t *mplsL3vpnConnectedInterfaces(struct variable *v, oid name[],
+ size_t *length, int exact,
+ size_t *var_len,
+ WriteMethod **write_method)
+{
+ struct listnode *node, *nnode;
+ struct bgp *bgp;
+ uint32_t count = 0;
+ struct vrf *vrf;
+
+ if (smux_header_generic(v, name, length, exact, var_len, write_method)
+ == MATCH_FAILED)
+ return NULL;
+
+ for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) {
+ if (is_bgp_vrf_mplsvpn(bgp)) {
+ vrf = vrf_lookup_by_name(bgp->name);
+ if (vrf == NULL)
+ continue;
+
+ count += vrf_interface_count(vrf);
+ }
+ }
+
+ return SNMP_INTEGER(count);
+}
+
+static int write_mplsL3vpnNotificationEnable(int action, uint8_t *var_val,
+ uint8_t var_val_type,
+ size_t var_val_len, uint8_t *statP,
+ oid *name, size_t length)
+{
+ uint32_t intval;
+
+ if (var_val_type != ASN_INTEGER)
+ return SNMP_ERR_WRONGTYPE;
+
+ if (var_val_len != sizeof(long))
+ return SNMP_ERR_WRONGLENGTH;
+
+ intval = *(long *)var_val;
+ bgp_mplsvpn_notif_enable = intval;
+ return SNMP_ERR_NOERROR;
+}
+
+static uint8_t *mplsL3vpnNotificationEnable(struct variable *v, oid name[],
+ size_t *length, int exact,
+ size_t *var_len,
+ WriteMethod **write_method)
+{
+ if (smux_header_generic(v, name, length, exact, var_len, write_method)
+ == MATCH_FAILED)
+ return NULL;
+
+ *write_method = write_mplsL3vpnNotificationEnable;
+ return SNMP_INTEGER(bgp_mplsvpn_notif_enable);
+}
+
+static uint8_t *mplsL3vpnVrfConfMaxPossRts(struct variable *v, oid name[],
+ size_t *length, int exact,
+ size_t *var_len,
+ WriteMethod **write_method)
+{
+ if (smux_header_generic(v, name, length, exact, var_len, write_method)
+ == MATCH_FAILED)
+ return NULL;
+
+ return SNMP_INTEGER(0);
+}
+
+static uint8_t *mplsL3vpnVrfConfRteMxThrshTime(struct variable *v, oid name[],
+ size_t *length, int exact,
+ size_t *var_len,
+ WriteMethod **write_method)
+{
+ if (smux_header_generic(v, name, length, exact, var_len, write_method)
+ == MATCH_FAILED)
+ return NULL;
+
+ return SNMP_INTEGER(0);
+}
+
+static uint8_t *mplsL3vpnIllLblRcvThrsh(struct variable *v, oid name[],
+ size_t *length, int exact,
+ size_t *var_len,
+ WriteMethod **write_method)
+{
+ if (smux_header_generic(v, name, length, exact, var_len, write_method)
+ == MATCH_FAILED)
+ return NULL;
+
+ return SNMP_INTEGER(0);
+}
+
+
+static struct bgp *bgp_lookup_by_name_next(char *vrf_name)
+{
+ struct bgp *bgp, *bgp_next = NULL;
+ struct listnode *node, *nnode;
+ bool first = false;
+
+ /*
+ * the vrfs are not stored alphabetically but since we are using the
+ * vrf name as an index we need the getnext function to return them
+ * in a atrict order. Thus run through and find the best next one.
+ */
+ for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) {
+ if (!is_bgp_vrf_mplsvpn(bgp))
+ continue;
+ if (strnlen(vrf_name, VRF_NAMSIZ) == 0 && bgp_next == NULL) {
+ first = true;
+ bgp_next = bgp;
+ continue;
+ }
+ if (first || strncmp(bgp->name, vrf_name, VRF_NAMSIZ) > 0) {
+ if (bgp_next == NULL)
+ bgp_next = bgp;
+ else if (strncmp(bgp->name, bgp_next->name, VRF_NAMSIZ)
+ < 0)
+ bgp_next = bgp;
+ }
+ }
+ return bgp_next;
+}
+
+/* 1.3.6.1.2.1.10.166.11.1.2.1.1.x = 14*/
+#define IFCONFTAB_NAMELEN 14
+static struct bgp *bgpL3vpnIfConf_lookup(struct variable *v, oid name[],
+ size_t *length, char *vrf_name,
+ ifindex_t *ifindex, int exact)
+{
+ struct bgp *bgp = NULL;
+ size_t namelen = v ? v->namelen : IFCONFTAB_NAMELEN;
+ struct interface *ifp;
+ int vrf_name_len, len;
+
+ /* too long ? */
+ if (*length - namelen > (VRF_NAMSIZ + sizeof(uint32_t)))
+ return NULL;
+ /* do we have index info in the oid ? */
+ if (*length - namelen != 0 && *length - namelen >= sizeof(uint32_t)) {
+ /* copy the info from the oid */
+ vrf_name_len = *length - (namelen + sizeof(ifindex_t));
+ oid2string(name + namelen, vrf_name_len, vrf_name);
+ oid2int(name + namelen + vrf_name_len, ifindex);
+ }
+
+ if (exact) {
+ /* Check the length. */
+ bgp = bgp_lookup_by_name(vrf_name);
+ if (bgp && !is_bgp_vrf_mplsvpn(bgp))
+ return NULL;
+ if (!bgp)
+ return NULL;
+ ifp = if_lookup_by_index(*ifindex, bgp->vrf_id);
+ if (!ifp)
+ return NULL;
+ } else {
+ if (strnlen(vrf_name, VRF_NAMSIZ) == 0)
+ bgp = bgp_lookup_by_name_next(vrf_name);
+ else
+ bgp = bgp_lookup_by_name(vrf_name);
+
+ while (bgp) {
+ ifp = if_vrf_lookup_by_index_next(*ifindex,
+ bgp->vrf_id);
+ if (ifp) {
+ vrf_name_len = strnlen(bgp->name, VRF_NAMSIZ);
+ *ifindex = ifp->ifindex;
+ len = vrf_name_len + sizeof(ifindex_t);
+ oid_copy_str(name + namelen, bgp->name,
+ vrf_name_len);
+ oid_copy_int(name + namelen + vrf_name_len,
+ ifindex);
+ *length = len + namelen;
+
+ return bgp;
+ }
+ *ifindex = 0;
+ bgp = bgp_lookup_by_name_next(bgp->name);
+ }
+
+ return NULL;
+ }
+ return bgp;
+}
+
+static uint8_t *mplsL3vpnIfConfTable(struct variable *v, oid name[],
+ size_t *length, int exact, size_t *var_len,
+ WriteMethod **write_method)
+{
+ char vrf_name[VRF_NAMSIZ];
+ ifindex_t ifindex = 0;
+ struct bgp *l3vpn_bgp;
+
+ if (smux_header_table(v, name, length, exact, var_len, write_method)
+ == MATCH_FAILED)
+ return NULL;
+
+ memset(vrf_name, 0, VRF_NAMSIZ);
+ l3vpn_bgp = bgpL3vpnIfConf_lookup(v, name, length, vrf_name, &ifindex,
+ exact);
+ if (!l3vpn_bgp)
+ return NULL;
+
+ switch (v->magic) {
+ case MPLSL3VPNIFVPNCLASSIFICATION:
+ return SNMP_INTEGER(2);
+ case MPLSL3VPNIFCONFSTORAGETYPE:
+ return SNMP_INTEGER(2);
+ case MPLSL3VPNIFCONFROWSTATUS:
+ return SNMP_INTEGER(1);
+ }
+ return NULL;
+}
+
+/* 1.3.6.1.2.1.10.166.11.1.2.2.1.x = 14*/
+#define VRFTAB_NAMELEN 14
+
+static struct bgp *bgpL3vpnVrf_lookup(struct variable *v, oid name[],
+ size_t *length, char *vrf_name, int exact)
+{
+ struct bgp *bgp = NULL;
+ size_t namelen = v ? v->namelen : VRFTAB_NAMELEN;
+ int len;
+
+ if (*length - namelen > VRF_NAMSIZ)
+ return NULL;
+ oid2string(name + namelen, *length - namelen, vrf_name);
+ if (exact) {
+ /* Check the length. */
+ bgp = bgp_lookup_by_name(vrf_name);
+ if (bgp && !is_bgp_vrf_mplsvpn(bgp))
+ return NULL;
+ } else {
+ bgp = bgp_lookup_by_name_next(vrf_name);
+
+ if (bgp == NULL)
+ return NULL;
+
+ len = strnlen(bgp->name, VRF_NAMSIZ);
+ oid_copy_str(name + namelen, bgp->name, len);
+ *length = len + namelen;
+ }
+ return bgp;
+}
+
+static uint8_t *mplsL3vpnVrfTable(struct variable *v, oid name[],
+ size_t *length, int exact, size_t *var_len,
+ WriteMethod **write_method)
+{
+ char vrf_name[VRF_NAMSIZ];
+ struct bgp *l3vpn_bgp;
+
+ if (smux_header_table(v, name, length, exact, var_len, write_method)
+ == MATCH_FAILED)
+ return NULL;
+
+ memset(vrf_name, 0, VRF_NAMSIZ);
+ l3vpn_bgp = bgpL3vpnVrf_lookup(v, name, length, vrf_name, exact);
+
+ if (!l3vpn_bgp)
+ return NULL;
+
+ switch (v->magic) {
+ case MPLSL3VPNVRFVPNID:
+ *var_len = 0;
+ return NULL;
+ case MPLSL3VPNVRFDESC:
+ *var_len = strnlen(l3vpn_bgp->name, VRF_NAMSIZ);
+ return (uint8_t *)l3vpn_bgp->name;
+ case MPLSL3VPNVRFRD:
+ /*
+ * this is a horror show but the MIB dicates one RD per vrf
+ * and not one RD per AFI as we (FRR) have. So this little gem
+ * returns the V4 one if it's set OR the v6 one if it's set or
+ * zero-length string id neither are set
+ */
+ memset(rd_buf, 0, RD_ADDRSTRLEN);
+ if (CHECK_FLAG(l3vpn_bgp->vpn_policy[AFI_IP].flags,
+ BGP_VPN_POLICY_TOVPN_RD_SET))
+ prefix_rd2str(&l3vpn_bgp->vpn_policy[AFI_IP].tovpn_rd,
+ rd_buf, sizeof(rd_buf));
+ else if (CHECK_FLAG(l3vpn_bgp->vpn_policy[AFI_IP6].flags,
+ BGP_VPN_POLICY_TOVPN_RD_SET))
+ prefix_rd2str(&l3vpn_bgp->vpn_policy[AFI_IP6].tovpn_rd,
+ rd_buf, sizeof(rd_buf));
+
+ *var_len = strnlen(rd_buf, RD_ADDRSTRLEN);
+ return (uint8_t *)rd_buf;
+ case MPLSL3VPNVRFCREATIONTIME:
+ return SNMP_INTEGER(
+ (uint32_t)l3vpn_bgp->snmp_stats->creation_time);
+ case MPLSL3VPNVRFOPERSTATUS:
+ if (l3vpn_bgp->snmp_stats->active)
+ return SNMP_INTEGER(1);
+ else
+ return SNMP_INTEGER(2);
+ case MPLSL3VPNVRFACTIVEINTERFACES:
+ return SNMP_INTEGER(bgp_vrf_interfaces(l3vpn_bgp, true));
+ case MPLSL3VPNVRFASSOCIATEDINTERFACES:
+ return SNMP_INTEGER(bgp_vrf_interfaces(l3vpn_bgp, false));
+ case MPLSL3VPNVRFCONFMIDRTETHRESH:
+ return SNMP_INTEGER(0);
+ case MPLSL3VPNVRFCONFHIGHRTETHRSH:
+ return SNMP_INTEGER(0);
+ case MPLSL3VPNVRFCONFMAXROUTES:
+ return SNMP_INTEGER(0);
+ case MPLSL3VPNVRFCONFLASTCHANGED:
+ return SNMP_INTEGER(
+ (uint32_t)l3vpn_bgp->snmp_stats->modify_time);
+ case MPLSL3VPNVRFCONFROWSTATUS:
+ return SNMP_INTEGER(1);
+ case MPLSL3VPNVRFCONFADMINSTATUS:
+ return SNMP_INTEGER(1);
+ case MPLSL3VPNVRFCONFSTORAGETYPE:
+ return SNMP_INTEGER(2);
+ }
+ return NULL;
+}
+
+/* 1.3.6.1.2.1.10.166.11.1.2.3.1.x = 14*/
+#define VRFRTTAB_NAMELEN 14
+static struct bgp *bgpL3vpnVrfRt_lookup(struct variable *v, oid name[],
+ size_t *length, char *vrf_name,
+ uint32_t *rt_index, uint8_t *rt_type,
+ int exact)
+{
+ uint32_t type_index_size;
+ struct bgp *l3vpn_bgp;
+ size_t namelen = v ? v->namelen : VRFRTTAB_NAMELEN;
+ int vrf_name_len, len;
+
+ /* too long ? */
+ if (*length - namelen
+ > (VRF_NAMSIZ + sizeof(uint32_t)) + sizeof(uint8_t))
+ return NULL;
+
+ type_index_size = sizeof(uint32_t) + sizeof(uint8_t);
+ /* do we have index info in the oid ? */
+ if (*length - namelen != 0 && *length - namelen >= type_index_size) {
+ /* copy the info from the oid */
+ vrf_name_len = *length - (namelen + type_index_size);
+ oid2string(name + namelen, vrf_name_len, vrf_name);
+ oid2int(name + namelen + vrf_name_len, (int *)rt_index);
+ *rt_type = name[namelen + vrf_name_len + sizeof(uint32_t)];
+ }
+
+ if (exact) {
+ l3vpn_bgp = bgp_lookup_by_name(vrf_name);
+ if (l3vpn_bgp && !is_bgp_vrf_mplsvpn(l3vpn_bgp))
+ return NULL;
+ if (!l3vpn_bgp)
+ return NULL;
+ /* check the index and type match up */
+ if ((*rt_index != AFI_IP) || (*rt_index != AFI_IP6))
+ return NULL;
+ /* do we have RT config */
+ if (!(l3vpn_bgp->vpn_policy[*rt_index]
+ .rtlist[BGP_VPN_POLICY_DIR_FROMVPN]
+ || l3vpn_bgp->vpn_policy[*rt_index]
+ .rtlist[BGP_VPN_POLICY_DIR_TOVPN]))
+ return NULL;
+ return l3vpn_bgp;
+ }
+ if (strnlen(vrf_name, VRF_NAMSIZ) == 0)
+ l3vpn_bgp = bgp_lookup_by_name_next(vrf_name);
+ else
+ l3vpn_bgp = bgp_lookup_by_name(vrf_name);
+ while (l3vpn_bgp) {
+ switch (*rt_index) {
+ case 0:
+ *rt_index = AFI_IP;
+ break;
+ case AFI_IP:
+ *rt_index = AFI_IP6;
+ break;
+ case AFI_IP6:
+ *rt_index = 0;
+ continue;
+ }
+ if (*rt_index) {
+ switch (*rt_type) {
+ case 0:
+ *rt_type = MPLSVPNVRFRTTYPEIMPORT;
+ break;
+ case MPLSVPNVRFRTTYPEIMPORT:
+ *rt_type = MPLSVPNVRFRTTYPEEXPORT;
+ break;
+ case MPLSVPNVRFRTTYPEEXPORT:
+ case MPLSVPNVRFRTTYPEBOTH:
+ *rt_type = 0;
+ break;
+ }
+ if (*rt_type) {
+ bool import, export;
+
+ import =
+ (!!l3vpn_bgp->vpn_policy[*rt_index].rtlist
+ [BGP_VPN_POLICY_DIR_FROMVPN]);
+ export =
+ (!!l3vpn_bgp->vpn_policy[*rt_index].rtlist
+ [BGP_VPN_POLICY_DIR_TOVPN]);
+ if (*rt_type == MPLSVPNVRFRTTYPEIMPORT
+ && !import)
+ continue;
+ if (*rt_type == MPLSVPNVRFRTTYPEEXPORT
+ && !export)
+ continue;
+ /* ckeck for both */
+ if (*rt_type == MPLSVPNVRFRTTYPEIMPORT && import
+ && export
+ && ecommunity_cmp(
+ l3vpn_bgp->vpn_policy[*rt_index].rtlist
+ [BGP_VPN_POLICY_DIR_FROMVPN],
+ l3vpn_bgp->vpn_policy[*rt_index].rtlist
+ [BGP_VPN_POLICY_DIR_TOVPN]))
+ *rt_type = MPLSVPNVRFRTTYPEBOTH;
+
+ /* we have a match copy the oid info */
+ vrf_name_len =
+ strnlen(l3vpn_bgp->name, VRF_NAMSIZ);
+ len = vrf_name_len + sizeof(uint32_t)
+ + sizeof(uint8_t);
+ oid_copy_str(name + namelen, l3vpn_bgp->name,
+ vrf_name_len);
+ oid_copy_int(name + namelen + vrf_name_len,
+ (int *)rt_index);
+ name[(namelen + len) - 1] = *rt_type;
+ *length = len + namelen;
+ return l3vpn_bgp;
+ }
+ l3vpn_bgp = bgp_lookup_by_name_next(l3vpn_bgp->name);
+ }
+ }
+ return NULL;
+}
+
+static const char *rt_type2str(uint8_t rt_type)
+{
+ switch (rt_type) {
+ case MPLSVPNVRFRTTYPEIMPORT:
+ return "import";
+ case MPLSVPNVRFRTTYPEEXPORT:
+ return "export";
+ case MPLSVPNVRFRTTYPEBOTH:
+ return "both";
+ default:
+ return "unknown";
+ }
+}
+static uint8_t *mplsL3vpnVrfRtTable(struct variable *v, oid name[],
+ size_t *length, int exact, size_t *var_len,
+ WriteMethod **write_method)
+{
+ char vrf_name[VRF_NAMSIZ];
+ struct bgp *l3vpn_bgp;
+ uint32_t rt_index = 0;
+ uint8_t rt_type = 0;
+ char *rt_b;
+
+ if (smux_header_table(v, name, length, exact, var_len, write_method)
+ == MATCH_FAILED)
+ return NULL;
+
+ memset(vrf_name, 0, VRF_NAMSIZ);
+ l3vpn_bgp = bgpL3vpnVrfRt_lookup(v, name, length, vrf_name, &rt_index,
+ &rt_type, exact);
+
+ if (!l3vpn_bgp)
+ return NULL;
+
+ switch (v->magic) {
+ case MPLSL3VPNVRFRT:
+ switch (rt_type) {
+ case MPLSVPNVRFRTTYPEIMPORT:
+ rt_b = ecommunity_ecom2str(
+ l3vpn_bgp->vpn_policy[rt_index]
+ .rtlist[BGP_VPN_POLICY_DIR_FROMVPN],
+ ECOMMUNITY_FORMAT_ROUTE_MAP,
+ ECOMMUNITY_ROUTE_TARGET);
+ break;
+ case MPLSVPNVRFRTTYPEEXPORT:
+ case MPLSVPNVRFRTTYPEBOTH:
+ rt_b = ecommunity_ecom2str(
+ l3vpn_bgp->vpn_policy[rt_index]
+ .rtlist[BGP_VPN_POLICY_DIR_TOVPN],
+ ECOMMUNITY_FORMAT_ROUTE_MAP,
+ ECOMMUNITY_ROUTE_TARGET);
+ break;
+ default:
+ rt_b = NULL;
+ break;
+ }
+ if (rt_b)
+ *var_len = strnlen(rt_b, ECOMMUNITY_STRLEN);
+ else
+ *var_len = 0;
+ return (uint8_t *)rt_b;
+ case MPLSL3VPNVRFRTDESCR:
+ /* since we dont have a description generate one */
+ memset(rt_description, 0, VRF_NAMSIZ + RT_PREAMBLE_SIZE);
+ snprintf(rt_description, VRF_NAMSIZ + RT_PREAMBLE_SIZE,
+ "RT %s for VRF %s", rt_type2str(rt_type),
+ l3vpn_bgp->name);
+ *var_len =
+ strnlen(rt_description, VRF_NAMSIZ + RT_PREAMBLE_SIZE);
+ return (uint8_t *)rt_description;
+ case MPLSL3VPNVRFRTROWSTATUS:
+ return SNMP_INTEGER(1);
+ case MPLSL3VPNVRFRTSTORAGETYPE:
+ return SNMP_INTEGER(2);
+ }
+ return NULL;
+}
+
+/* 1.3.6.1.2.1.10.166.11.1.3.1.1.x = 14*/
+#define PERFTAB_NAMELEN 14
+
+static uint8_t *mplsL3vpnPerfTable(struct variable *v, oid name[],
+ size_t *length, int exact, size_t *var_len,
+ WriteMethod **write_method)
+{
+ char vrf_name[VRF_NAMSIZ];
+ struct bgp *l3vpn_bgp;
+
+ if (smux_header_table(v, name, length, exact, var_len, write_method)
+ == MATCH_FAILED)
+ return NULL;
+
+ memset(vrf_name, 0, VRF_NAMSIZ);
+ l3vpn_bgp = bgpL3vpnVrf_lookup(v, name, length, vrf_name, exact);
+
+ if (!l3vpn_bgp)
+ return NULL;
+
+ switch (v->magic) {
+ case MPLSL3VPNVRFPERFROUTESADDED:
+ return SNMP_INTEGER(l3vpn_bgp->snmp_stats->routes_added);
+ case MPLSL3VPNVRFPERFROUTESDELETED:
+ return SNMP_INTEGER(l3vpn_bgp->snmp_stats->routes_deleted);
+ case MPLSL3VPNVRFPERFCURRNUMROUTES:
+ return SNMP_INTEGER(bgp_mpls_l3vpn_current_routes(l3vpn_bgp));
+ }
+ return NULL;
+}
+
+static struct bgp_path_info *
+bgp_lookup_route(struct bgp *l3vpn_bgp, struct bgp_dest **dest,
+ struct prefix *prefix, uint16_t policy, struct ipaddr *nexthop)
+{
+ struct bgp_path_info *pi = NULL;
+ struct bgp_table *table;
+
+ switch (prefix->family) {
+ case AF_INET:
+ table = l3vpn_bgp->rib[AFI_IP][SAFI_UNICAST];
+ break;
+ case AF_INET6:
+ table = l3vpn_bgp->rib[AFI_IP6][SAFI_UNICAST];
+ break;
+ default:
+ return NULL;
+ }
+
+ /*get the prefix */
+ *dest = bgp_node_lookup(table, prefix);
+ if (*dest == NULL)
+ return NULL;
+
+ /* now find the right path */
+ pi = bgp_dest_get_bgp_path_info(*dest);
+ for (; pi; pi = pi->next) {
+ switch (nexthop->ipa_type) {
+ case IPADDR_V4:
+ if (nexthop->ip._v4_addr.s_addr
+ == pi->attr->nexthop.s_addr)
+ return pi;
+ break;
+ case IPADDR_V6:
+ if (memcmp(&nexthop->ip._v6_addr,
+ &pi->attr->mp_nexthop_global,
+ sizeof(struct in6_addr))
+ == 0)
+ return pi;
+ break;
+ default:
+ return pi;
+ }
+ }
+ return NULL;
+}
+
+static struct bgp_path_info *bgp_lookup_route_next(struct bgp **l3vpn_bgp,
+ struct bgp_dest **dest,
+ struct prefix *prefix,
+ uint16_t *policy,
+ struct ipaddr *nexthop)
+{
+ struct bgp_path_info *pi = NULL;
+ struct bgp_table *table;
+ const struct prefix *p;
+ uint8_t family;
+
+ /* First route?*/
+ if (prefix->prefixlen == 0) {
+ /* try V4 table */
+ table = (*l3vpn_bgp)->rib[AFI_IP][SAFI_UNICAST];
+ for (*dest = bgp_table_top(table); *dest;
+ *dest = bgp_route_next(*dest)) {
+ pi = bgp_dest_get_bgp_path_info(*dest);
+ if (pi)
+ break;
+ }
+
+ if (!pi) {
+ /* try V6 table */
+ table = (*l3vpn_bgp)->rib[AFI_IP6][SAFI_UNICAST];
+ for (*dest = bgp_table_top(table); *dest;
+ *dest = bgp_route_next(*dest)) {
+ pi = bgp_dest_get_bgp_path_info(*dest);
+ if (pi)
+ break;
+ }
+ }
+ return pi;
+ }
+ /* real next search for the entry first use exact lookup */
+ pi = bgp_lookup_route(*l3vpn_bgp, dest, prefix, *policy, nexthop);
+
+ if (pi == NULL)
+ return NULL;
+
+ p = bgp_dest_get_prefix(*dest);
+ family = p->family;
+
+ /* We have found the input path let's find the next one in the list */
+ if (pi->next) {
+ /* ensure OID is always higher for multipath routes by
+ * incrementing opaque policy oid
+ */
+ *policy += 1;
+ return pi->next;
+ }
+
+ /* No more paths in the input route so find the next route */
+ for (; *l3vpn_bgp;
+ *l3vpn_bgp = bgp_lookup_by_name_next((*l3vpn_bgp)->name)) {
+ *policy = 0;
+ if (!*dest) {
+ table = (*l3vpn_bgp)->rib[AFI_IP][SAFI_UNICAST];
+ *dest = bgp_table_top(table);
+ family = AF_INET;
+ } else
+ *dest = bgp_route_next(*dest);
+
+ while (true) {
+ for (; *dest; *dest = bgp_route_next(*dest)) {
+ pi = bgp_dest_get_bgp_path_info(*dest);
+
+ if (pi)
+ return pi;
+ }
+ if (family == AF_INET) {
+ table = (*l3vpn_bgp)
+ ->rib[AFI_IP6][SAFI_UNICAST];
+ *dest = bgp_table_top(table);
+ family = AF_INET6;
+ continue;
+ }
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+static bool is_addr_type(oid id)
+{
+ switch (id) {
+ case INETADDRESSTYPEUNKNOWN:
+ case INETADDRESSTYPEIPV4:
+ case INETADDRESSTYPEIPV6:
+ return true;
+ }
+ return false;
+}
+
+/* 1.3.6.1.2.1.10.166.11.1.4.1.1.x = 14*/
+#define PERFTAB_NAMELEN 14
+
+static struct bgp_path_info *bgpL3vpnRte_lookup(struct variable *v, oid name[],
+ size_t *length, char *vrf_name,
+ struct bgp **l3vpn_bgp,
+ struct bgp_dest **dest,
+ uint16_t *policy, int exact)
+{
+ uint8_t i;
+ uint8_t vrf_name_len = 0;
+ struct bgp_path_info *pi = NULL;
+ size_t namelen = v ? v->namelen : IFCONFTAB_NAMELEN;
+ struct prefix prefix = {0};
+ struct ipaddr nexthop = {0};
+ uint8_t prefix_type;
+ uint8_t nexthop_type;
+
+ if ((uint32_t)(*length - namelen) > (VRF_NAMSIZ + 37))
+ return NULL;
+
+ if (*length - namelen != 0) {
+ /* parse incoming OID */
+ for (i = namelen; i < (*length); i++) {
+ if (is_addr_type(name[i]))
+ break;
+ vrf_name_len++;
+ }
+ if (vrf_name_len > VRF_NAMSIZ)
+ return NULL;
+
+ oid2string(name + namelen, vrf_name_len, vrf_name);
+ prefix_type = name[i++];
+ switch (prefix_type) {
+ case INETADDRESSTYPEUNKNOWN:
+ prefix.family = AF_UNSPEC;
+ break;
+ case INETADDRESSTYPEIPV4:
+ prefix.family = AF_INET;
+ oid2in_addr(&name[i], sizeof(struct in_addr),
+ &prefix.u.prefix4);
+ i += sizeof(struct in_addr);
+ break;
+ case INETADDRESSTYPEIPV6:
+ prefix.family = AF_INET6;
+ oid2in_addr(&name[i], sizeof(struct in6_addr),
+ &prefix.u.prefix4); /* sic */
+ i += sizeof(struct in6_addr);
+ break;
+ }
+ prefix.prefixlen = (uint8_t)name[i++];
+ *policy |= name[i++] << 8;
+ *policy |= name[i++];
+ nexthop_type = name[i++];
+ switch (nexthop_type) {
+ case INETADDRESSTYPEUNKNOWN:
+ nexthop.ipa_type = (prefix.family == AF_INET)
+ ? IPADDR_V4
+ : IPADDR_V6;
+ break;
+ case INETADDRESSTYPEIPV4:
+ nexthop.ipa_type = IPADDR_V4;
+ oid2in_addr(&name[i], sizeof(struct in_addr),
+ &nexthop.ip._v4_addr);
+ /* i += sizeof(struct in_addr); */
+ break;
+ case INETADDRESSTYPEIPV6:
+ nexthop.ipa_type = IPADDR_V6;
+ oid2in_addr(&name[i], sizeof(struct in6_addr),
+ &nexthop.ip._v4_addr); /* sic */
+ /* i += sizeof(struct in6_addr); */
+ break;
+ }
+ }
+
+ if (exact) {
+ *l3vpn_bgp = bgp_lookup_by_name(vrf_name);
+ if (*l3vpn_bgp && !is_bgp_vrf_mplsvpn(*l3vpn_bgp))
+ return NULL;
+ if (*l3vpn_bgp == NULL)
+ return NULL;
+
+ /* now lookup the route in this bgp table */
+ pi = bgp_lookup_route(*l3vpn_bgp, dest, &prefix, *policy,
+ &nexthop);
+ } else {
+ int str_len;
+
+ str_len = strnlen(vrf_name, VRF_NAMSIZ);
+ if (str_len == 0) {
+ *l3vpn_bgp = bgp_lookup_by_name_next(vrf_name);
+ } else
+ /* otherwise lookup the one we have */
+ *l3vpn_bgp = bgp_lookup_by_name(vrf_name);
+
+ if (l3vpn_bgp == NULL)
+ return NULL;
+
+ pi = bgp_lookup_route_next(l3vpn_bgp, dest, &prefix, policy,
+ &nexthop);
+ if (pi) {
+ uint8_t vrf_name_len =
+ strnlen((*l3vpn_bgp)->name, VRF_NAMSIZ);
+ const struct prefix *p = bgp_dest_get_prefix(*dest);
+ uint8_t oid_index;
+ bool v4 = (p->family == AF_INET);
+ uint8_t addr_len = v4 ? sizeof(struct in_addr)
+ : sizeof(struct in6_addr);
+ struct attr *attr = pi->attr;
+
+ /* copy the index parameters */
+ oid_copy_str(&name[namelen], (*l3vpn_bgp)->name,
+ vrf_name_len);
+ oid_index = namelen + vrf_name_len;
+ name[oid_index++] =
+ v4 ? INETADDRESSTYPEIPV4 : INETADDRESSTYPEIPV6;
+ oid_copy_addr(&name[oid_index], &p->u.prefix4,
+ addr_len);
+ oid_index += addr_len;
+ name[oid_index++] = p->prefixlen;
+ name[oid_index++] = *policy >> 8;
+ name[oid_index++] = *policy & 0xff;
+
+ if (!BGP_ATTR_NEXTHOP_AFI_IP6(attr)) {
+ if (attr->nexthop.s_addr == INADDR_ANY)
+ name[oid_index++] =
+ INETADDRESSTYPEUNKNOWN;
+ else {
+ name[oid_index++] = INETADDRESSTYPEIPV4;
+ oid_copy_addr(&name[oid_index],
+ &attr->nexthop,
+ sizeof(struct in_addr));
+ oid_index += sizeof(struct in_addr);
+ }
+ } else {
+ if (IN6_IS_ADDR_UNSPECIFIED(
+ &attr->mp_nexthop_global))
+ name[oid_index++] =
+ INETADDRESSTYPEUNKNOWN;
+ else {
+ name[oid_index++] = INETADDRESSTYPEIPV6;
+ oid_copy_addr(
+ &name[oid_index],
+ (struct in_addr *)&attr
+ ->mp_nexthop_global,
+ sizeof(struct in6_addr));
+ oid_index += sizeof(struct in6_addr);
+ }
+ }
+ *length = oid_index;
+ }
+ }
+ return pi;
+}
+
+static uint8_t *mplsL3vpnRteTable(struct variable *v, oid name[],
+ size_t *length, int exact, size_t *var_len,
+ WriteMethod **write_method)
+{
+ char vrf_name[VRF_NAMSIZ];
+ struct bgp *l3vpn_bgp;
+ struct bgp_dest *dest;
+ struct bgp_path_info *pi;
+ const struct prefix *p;
+ uint16_t policy = 0;
+
+ if (smux_header_table(v, name, length, exact, var_len, write_method)
+ == MATCH_FAILED)
+ return NULL;
+
+ memset(vrf_name, 0, VRF_NAMSIZ);
+ pi = bgpL3vpnRte_lookup(v, name, length, vrf_name, &l3vpn_bgp, &dest,
+ &policy, exact);
+
+
+ if (!pi)
+ return NULL;
+
+ p = bgp_dest_get_prefix(dest);
+
+ if (!p)
+ return NULL;
+
+ switch (v->magic) {
+ case MPLSL3VPNVRFRTEINETCIDRDESTTYPE:
+ switch (p->family) {
+ case AF_INET:
+ return SNMP_INTEGER(INETADDRESSTYPEIPV4);
+ case AF_INET6:
+ return SNMP_INTEGER(INETADDRESSTYPEIPV6);
+ default:
+ return SNMP_INTEGER(INETADDRESSTYPEUNKNOWN);
+ }
+ case MPLSL3VPNVRFRTEINETCIDRDEST:
+ switch (p->family) {
+ case AF_INET:
+ return SNMP_IPADDRESS(p->u.prefix4);
+ case AF_INET6:
+ return SNMP_IP6ADDRESS(p->u.prefix6);
+ default:
+ *var_len = 0;
+ return NULL;
+ }
+ case MPLSL3VPNVRFRTEINETCIDRPFXLEN:
+ return SNMP_INTEGER(p->prefixlen);
+ case MPLSL3VPNVRFRTEINETCIDRPOLICY:
+ *var_len = sizeof(mpls_l3vpn_policy_oid);
+ mpls_l3vpn_policy_oid[0] = policy >> 8;
+ mpls_l3vpn_policy_oid[1] = policy & 0xff;
+ return (uint8_t *)mpls_l3vpn_policy_oid;
+ case MPLSL3VPNVRFRTEINETCIDRNHOPTYPE:
+ if (!BGP_ATTR_NEXTHOP_AFI_IP6(pi->attr)) {
+ if (pi->attr->nexthop.s_addr == INADDR_ANY)
+ return SNMP_INTEGER(INETADDRESSTYPEUNKNOWN);
+ else
+ return SNMP_INTEGER(INETADDRESSTYPEIPV4);
+ } else if (IN6_IS_ADDR_UNSPECIFIED(
+ &pi->attr->mp_nexthop_global))
+ return SNMP_INTEGER(INETADDRESSTYPEUNKNOWN);
+ else
+ return SNMP_INTEGER(INETADDRESSTYPEIPV6);
+
+ case MPLSL3VPNVRFRTEINETCIDRNEXTHOP:
+ if (!BGP_ATTR_NEXTHOP_AFI_IP6(pi->attr))
+ if (pi->attr->nexthop.s_addr == INADDR_ANY) {
+ *var_len = 0;
+ return (uint8_t *)empty_nhop;
+ } else
+ return SNMP_IPADDRESS(pi->attr->nexthop);
+ else if (IN6_IS_ADDR_UNSPECIFIED(
+ &pi->attr->mp_nexthop_global)) {
+ *var_len = 0;
+ return (uint8_t *)empty_nhop;
+ } else
+ return SNMP_IP6ADDRESS(pi->attr->mp_nexthop_global);
+
+ case MPLSL3VPNVRFRTEINETCIDRIFINDEX:
+ if (pi->nexthop && pi->nexthop->nexthop)
+ return SNMP_INTEGER(pi->nexthop->nexthop->ifindex);
+ else
+ return SNMP_INTEGER(0);
+ case MPLSL3VPNVRFRTEINETCIDRTYPE:
+ if (pi->nexthop && pi->nexthop->nexthop) {
+ switch (pi->nexthop->nexthop->type) {
+ case NEXTHOP_TYPE_IFINDEX:
+ return SNMP_INTEGER(
+ MPLSL3VPNVRFRTECIDRTYPELOCAL);
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ return SNMP_INTEGER(
+ MPLSL3VPNVRFRTECIDRTYPEREMOTE);
+ case NEXTHOP_TYPE_BLACKHOLE:
+ switch (pi->nexthop->nexthop->bh_type) {
+ case BLACKHOLE_REJECT:
+ return SNMP_INTEGER(
+ MPLSL3VPNVRFRTECIDRTYPEREJECT);
+ default:
+ return SNMP_INTEGER(
+ MPLSL3VPNVRFRTECIDRTYPEBLACKHOLE);
+ }
+ default:
+ return SNMP_INTEGER(
+ MPLSL3VPNVRFRTECIDRTYPEOTHER);
+ }
+ } else
+ return SNMP_INTEGER(MPLSL3VPNVRFRTECIDRTYPEOTHER);
+ case MPLSL3VPNVRFRTEINETCIDRPROTO:
+ switch (pi->type) {
+ case ZEBRA_ROUTE_CONNECT:
+ return SNMP_INTEGER(IANAIPROUTEPROTOCOLLOCAL);
+ case ZEBRA_ROUTE_STATIC:
+ return SNMP_INTEGER(IANAIPROUTEPROTOCOLNETMGMT);
+ case ZEBRA_ROUTE_RIP:
+ case ZEBRA_ROUTE_RIPNG:
+ return SNMP_INTEGER(IANAIPROUTEPROTOCOLRIP);
+ case ZEBRA_ROUTE_OSPF:
+ case ZEBRA_ROUTE_OSPF6:
+ return SNMP_INTEGER(IANAIPROUTEPROTOCOLOSPF);
+ case ZEBRA_ROUTE_ISIS:
+ return SNMP_INTEGER(IANAIPROUTEPROTOCOLISIS);
+ case ZEBRA_ROUTE_BGP:
+ return SNMP_INTEGER(IANAIPROUTEPROTOCOLBGP);
+ case ZEBRA_ROUTE_EIGRP:
+ return SNMP_INTEGER(IANAIPROUTEPROTOCOLCISCOEIGRP);
+ default:
+ return SNMP_INTEGER(IANAIPROUTEPROTOCOLOTHER);
+ }
+ case MPLSL3VPNVRFRTEINETCIDRAGE:
+ return SNMP_INTEGER(pi->uptime);
+ case MPLSL3VPNVRFRTEINETCIDRNEXTHOPAS:
+ return SNMP_INTEGER(pi->peer ? pi->peer->as : 0);
+ case MPLSL3VPNVRFRTEINETCIDRMETRIC1:
+ if (pi->extra)
+ return SNMP_INTEGER(pi->extra->igpmetric);
+ else
+ return SNMP_INTEGER(0);
+ case MPLSL3VPNVRFRTEINETCIDRMETRIC2:
+ return SNMP_INTEGER(-1);
+ case MPLSL3VPNVRFRTEINETCIDRMETRIC3:
+ return SNMP_INTEGER(-1);
+ case MPLSL3VPNVRFRTEINETCIDRMETRIC4:
+ return SNMP_INTEGER(-1);
+ case MPLSL3VPNVRFRTEINETCIDRMETRIC5:
+ return SNMP_INTEGER(-1);
+ case MPLSL3VPNVRFRTEINETCIDRXCPOINTER:
+ return SNMP_OCTET(0);
+ case MPLSL3VPNVRFRTEINETCIDRSTATUS:
+ return SNMP_INTEGER(1);
+ }
+ return NULL;
+}
+
+void bgp_mpls_l3vpn_module_init(void)
+{
+ hook_register(bgp_vrf_status_changed, bgp_vrf_check_update_active);
+ hook_register(bgp_snmp_init_stats, bgp_init_snmp_stats);
+ hook_register(bgp_snmp_update_last_changed,
+ bgp_mpls_l3vpn_update_last_changed);
+ hook_register(bgp_snmp_update_stats, bgp_snmp_update_route_stats);
+ REGISTER_MIB("mplsL3VpnMIB", mpls_l3vpn_variables, variable,
+ mpls_l3vpn_oid);
+}
diff --git a/bgpd/bgp_mplsvpn_snmp.h b/bgpd/bgp_mplsvpn_snmp.h
new file mode 100644
index 0000000000..781d5e98f6
--- /dev/null
+++ b/bgpd/bgp_mplsvpn_snmp.h
@@ -0,0 +1,31 @@
+/* MPLS/BGP L3VPN MIB
+ * Copyright (C) 2020 Volta Networks Inc
+ *
+ * This file is part of FRR.
+ *
+ * FRRouting is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRRouting is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+void bgp_mpls_l3vpn_module_init(void);
+
+#define MPLSL3VPNVRFRTECIDRTYPEOTHER 1
+#define MPLSL3VPNVRFRTECIDRTYPEREJECT 2
+#define MPLSL3VPNVRFRTECIDRTYPELOCAL 3
+#define MPLSL3VPNVRFRTECIDRTYPEREMOTE 4
+#define MPLSL3VPNVRFRTECIDRTYPEBLACKHOLE 5
+
+#define MPLSVPNVRFRTTYPEIMPORT 1
+#define MPLSVPNVRFRTTYPEEXPORT 2
+#define MPLSVPNVRFRTTYPEBOTH 3
diff --git a/bgpd/bgp_nb.c b/bgpd/bgp_nb.c
index 08ec64242d..f65a4be677 100644
--- a/bgpd/bgp_nb.c
+++ b/bgpd/bgp_nb.c
@@ -3092,6 +3092,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/add-paths/path-type",
.cbs = {
.modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify,
@@ -3324,6 +3394,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/add-paths/path-type",
.cbs = {
.modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify,
@@ -3556,6 +3696,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/add-paths/path-type",
.cbs = {
.modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify,
@@ -3788,6 +3998,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/add-paths/path-type",
.cbs = {
.modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify,
@@ -4020,6 +4300,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/add-paths/path-type",
.cbs = {
.modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_add_paths_path_type_modify,
@@ -4212,6 +4562,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/add-paths/path-type",
.cbs = {
.modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_add_paths_path_type_modify,
@@ -4404,6 +4824,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/as-path-options/allow-own-as",
.cbs = {
.modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_modify,
@@ -4472,6 +4962,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/route-reflector/route-reflector-client",
.cbs = {
.modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_route_reflector_route_reflector_client_modify,
@@ -4490,6 +5050,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/route-reflector/route-reflector-client",
.cbs = {
.modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify,
@@ -4508,6 +5138,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-unicast/add-paths/path-type",
.cbs = {
.modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_add_paths_path_type_modify,
@@ -5048,6 +5748,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/add-paths/path-type",
.cbs = {
.modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify,
@@ -5279,6 +6049,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/add-paths/path-type",
.cbs = {
.modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify,
@@ -5510,6 +6350,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/add-paths/path-type",
.cbs = {
.modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify,
@@ -5742,6 +6652,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/add-paths/path-type",
.cbs = {
.modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify,
@@ -6444,6 +7424,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/route-reflector/route-reflector-client",
.cbs = {
.modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify,
@@ -6462,6 +7512,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-unicast/add-paths/path-type",
.cbs = {
.modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_add_paths_path_type_modify,
@@ -7002,6 +8122,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/add-paths/path-type",
.cbs = {
.modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify,
@@ -7240,6 +8430,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/add-paths/path-type",
.cbs = {
.modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify,
@@ -7478,6 +8738,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/add-paths/path-type",
.cbs = {
.modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify,
@@ -7710,6 +9040,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/add-paths/path-type",
.cbs = {
.modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify,
@@ -7942,6 +9342,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/add-paths/path-type",
.cbs = {
.modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_add_paths_path_type_modify,
@@ -8134,6 +9604,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/add-paths/path-type",
.cbs = {
.modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_add_paths_path_type_modify,
@@ -8326,6 +9866,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l2vpn-evpn/as-path-options/allow-own-as",
.cbs = {
.modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_modify,
@@ -8412,6 +10022,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/route-reflector/route-reflector-client",
.cbs = {
.modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify,
@@ -8430,6 +10110,76 @@ const struct frr_yang_module_info frr_bgp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-import",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-export",
+ .cbs = {
+ .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify,
+ .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy,
+ }
+ },
+ {
.xpath = NULL,
},
}
diff --git a/bgpd/bgp_nb.h b/bgpd/bgp_nb.h
index 9c81c2457e..eb7725d3dd 100644
--- a/bgpd/bgp_nb.h
+++ b/bgpd/bgp_nb.h
@@ -1329,6 +1329,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_soft_reconfiguration_
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_weight_weight_attribute_modify(
struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify(
@@ -1431,6 +1471,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_weight_weight_attri
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_as_path_options_allow_own_as_modify(
@@ -1531,6 +1611,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_weight_weight_attri
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_as_path_options_allow_own_as_modify(
@@ -1631,6 +1751,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_weight_weight
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_as_path_options_allow_own_as_modify(
@@ -1731,6 +1891,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_weight_weight
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_as_path_options_allow_own_as_modify(
@@ -1813,6 +2013,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_weight_weight_a
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_as_path_options_allow_own_as_modify(
@@ -1895,6 +2135,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_weight_weight_a
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_destroy(
@@ -1921,18 +2201,138 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_route_server_route_serv
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_soft_reconfiguration_modify(
struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_route_reflector_route_reflector_client_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_route_server_route_server_client_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_soft_reconfiguration_modify(
struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_server_route_server_client_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_soft_reconfiguration_modify(
struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_as_path_options_allow_own_as_modify(
@@ -2175,6 +2575,46 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_weight_wei
struct nb_cb_modify_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_as_path_options_allow_own_as_modify(
@@ -2275,6 +2715,46 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_weight_w
struct nb_cb_modify_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_as_path_options_allow_own_as_modify(
@@ -2375,6 +2855,46 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_weight_w
struct nb_cb_modify_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_as_path_options_allow_own_as_modify(
@@ -2475,6 +2995,46 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_we
struct nb_cb_modify_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_as_path_options_allow_own_as_modify(
@@ -2771,12 +3331,92 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_route_ser
struct nb_cb_modify_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_soft_reconfiguration_modify(
struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_server_route_server_client_modify(
struct nb_cb_modify_args *args);
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_soft_reconfiguration_modify(
struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_as_path_options_allow_own_as_modify(
@@ -3019,6 +3659,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_weight_weight_att
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_as_path_options_allow_own_as_modify(
@@ -3119,6 +3799,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_weight_weight_a
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_as_path_options_allow_own_as_modify(
@@ -3219,6 +3939,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_weight_weight_a
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_as_path_options_allow_own_as_modify(
@@ -3319,6 +4079,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_weight_we
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_as_path_options_allow_own_as_modify(
@@ -3419,6 +4219,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_weight_we
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_as_path_options_allow_own_as_modify(
@@ -3501,6 +4341,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_weight_weig
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_add_paths_path_type_modify(
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_as_path_options_allow_own_as_modify(
@@ -3583,6 +4463,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_weight_weig
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_weight_weight_attribute_destroy(
struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_modify(
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_destroy(
@@ -3615,12 +4535,92 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_route_server_rou
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_soft_reconfiguration_modify(
struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify(
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_route_server_route_server_client_modify(
struct nb_cb_modify_args *args);
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_soft_reconfiguration_modify(
struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args);
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args);
/*
* Callback registered with routing_nb lib to validate only
diff --git a/bgpd/bgp_nb_config.c b/bgpd/bgp_nb_config.c
index f6a138ba45..f2443bd164 100644
--- a/bgpd/bgp_nb_config.c
+++ b/bgpd/bgp_nb_config.c
@@ -32,6 +32,8 @@
#include "bgpd/bgp_io.h"
#include "bgpd/bgp_damp.h"
+DEFINE_HOOK(bgp_snmp_init_stats, (struct bgp *bgp), (bgp))
+
FRR_CFG_DEFAULT_ULONG(BGP_CONNECT_RETRY,
{ .val_ulong = 10, .match_profile = "datacenter", },
{ .val_ulong = 120 },
@@ -9862,6 +9864,7 @@ static int bgp_global_afi_safi_ip_unicast_vpn_config_import_export_vpn_modify(
UNSET_FLAG(bgp->af_flags[afi][safi], flag);
}
+ hook_call(bgp_snmp_init_stats, bgp);
return NB_OK;
}
@@ -15124,6 +15127,66 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_orf_capability_orf_bo
return NB_OK;
}
+static int bgp_neighbor_afi_safi_rmap_modify(struct nb_cb_modify_args *args,
+ int direct)
+{
+ struct bgp *bgp;
+ const char *peer_str;
+ struct peer *peer;
+ const struct lyd_node *nbr_dnode;
+ const struct lyd_node *nbr_af_dnode;
+ const char *af_name;
+ afi_t afi;
+ safi_t safi;
+ const char *name_str;
+ struct route_map *route_map;
+ int ret;
+
+ nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi");
+ af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name");
+ yang_afi_safi_identity2value(af_name, &afi, &safi);
+
+ nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "neighbor");
+ bgp = nb_running_get_entry(nbr_dnode, NULL, true);
+ peer_str = yang_dnode_get_string(nbr_dnode, "./remote-address");
+ peer = bgp_neighbor_peer_lookup(bgp, peer_str, args->errmsg,
+ args->errmsg_len);
+
+ name_str = yang_dnode_get_string(args->dnode, NULL);
+ route_map = route_map_lookup_by_name(name_str);
+ ret = peer_route_map_set(peer, afi, safi, direct, name_str, route_map);
+
+ return bgp_nb_errmsg_return(args->errmsg, args->errmsg_len, ret);
+}
+
+static int bgp_neighbor_afi_safi_rmap_destroy(struct nb_cb_destroy_args *args,
+ int direct)
+{
+ struct bgp *bgp;
+ const char *peer_str;
+ struct peer *peer;
+ const struct lyd_node *nbr_dnode;
+ const struct lyd_node *nbr_af_dnode;
+ const char *af_name;
+ afi_t afi;
+ safi_t safi;
+ int ret;
+
+ nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi");
+ af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name");
+ yang_afi_safi_identity2value(af_name, &afi, &safi);
+
+ nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "neighbor");
+ bgp = nb_running_get_entry(nbr_dnode, NULL, true);
+ peer_str = yang_dnode_get_string(nbr_dnode, "./remote-address");
+ peer = bgp_neighbor_peer_lookup(bgp, peer_str, args->errmsg,
+ args->errmsg_len);
+
+ ret = peer_route_map_unset(peer, afi, safi, direct);
+
+ return bgp_nb_errmsg_return(args->errmsg, args->errmsg_len, ret);
+}
+
/*
* XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-unicast/filter-config/rmap-import
@@ -15135,9 +15198,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_rmap_im
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN);
}
return NB_OK;
@@ -15150,9 +15213,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_rmap_im
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN);
}
return NB_OK;
@@ -15169,9 +15232,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_rmap_ex
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT);
}
return NB_OK;
@@ -15184,14 +15247,72 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_rmap_ex
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT);
}
return NB_OK;
}
+static int bgp_neighbor_afi_safi_plist_modify(struct nb_cb_modify_args *args,
+ int direct)
+{
+ struct bgp *bgp;
+ const char *peer_str;
+ struct peer *peer;
+ const struct lyd_node *nbr_dnode;
+ const struct lyd_node *nbr_af_dnode;
+ const char *af_name;
+ afi_t afi;
+ safi_t safi;
+ const char *name_str;
+
+ nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi");
+ af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name");
+ yang_afi_safi_identity2value(af_name, &afi, &safi);
+
+ nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "neighbor");
+ bgp = nb_running_get_entry(nbr_dnode, NULL, true);
+ peer_str = yang_dnode_get_string(nbr_dnode, "./remote-address");
+ peer = bgp_neighbor_peer_lookup(bgp, peer_str, args->errmsg,
+ args->errmsg_len);
+
+ name_str = yang_dnode_get_string(args->dnode, NULL);
+ if (peer_prefix_list_set(peer, afi, safi, direct, name_str) < 0)
+ return NB_ERR_INCONSISTENCY;
+
+ return NB_OK;
+}
+
+static int bgp_neighbor_afi_safi_plist_destroy(struct nb_cb_destroy_args *args,
+ int direct)
+{
+ struct bgp *bgp;
+ const char *peer_str;
+ struct peer *peer;
+ const struct lyd_node *nbr_dnode;
+ const struct lyd_node *nbr_af_dnode;
+ const char *af_name;
+ afi_t afi;
+ safi_t safi;
+
+ nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi");
+ af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name");
+ yang_afi_safi_identity2value(af_name, &afi, &safi);
+
+ nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "neighbor");
+ bgp = nb_running_get_entry(nbr_dnode, NULL, true);
+ peer_str = yang_dnode_get_string(nbr_dnode, "./remote-address");
+ peer = bgp_neighbor_peer_lookup(bgp, peer_str, args->errmsg,
+ args->errmsg_len);
+
+ if (peer_prefix_list_unset(peer, afi, safi, direct) < 0)
+ return NB_ERR_INCONSISTENCY;
+
+ return NB_OK;
+}
+
/*
* XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-unicast/filter-config/plist-import
@@ -15203,9 +15324,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_plist_i
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN);
}
return NB_OK;
@@ -15218,9 +15339,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_plist_i
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN);
}
return NB_OK;
@@ -15237,9 +15358,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_plist_e
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT);
}
return NB_OK;
@@ -15252,9 +15373,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_plist_e
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT);
}
return NB_OK;
@@ -16443,6 +16564,347 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_weight_weight_attribu
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/add-paths/path-type
*/
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify(
@@ -17401,6 +17863,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_weight_weight_attri
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/add-paths/path-type
*/
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify(
@@ -18359,6 +19161,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_weight_weight_attri
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/add-paths/path-type
*/
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify(
@@ -19317,6 +20459,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_weight_weight
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/add-paths/path-type
*/
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify(
@@ -20275,6 +21757,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_weight_weight
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/add-paths/path-type
*/
int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_add_paths_path_type_modify(
@@ -21078,6 +22900,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_weight_weight_a
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ return NB_OK;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/add-paths/path-type
*/
int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_add_paths_path_type_modify(
@@ -21881,6 +24043,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_weight_weight_a
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ return NB_OK;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/as-path-options/allow-own-as
*/
int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_modify(
@@ -22156,6 +24658,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_soft_reconfiguration_mo
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/rmap-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/rmap-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/plist-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/plist-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/access-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/access-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/route-reflector/route-reflector-client
*/
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_route_reflector_route_reflector_client_modify(
@@ -22225,6 +25067,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_soft_reconfiguration
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/route-reflector/route-reflector-client
*/
int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify(
@@ -22292,6 +25474,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_soft_reconfiguration
return NB_OK;
}
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
static int
bgp_unnumbered_neighbor_afi_safi_flag_modify(struct nb_cb_modify_args *args,
uint32_t flags, bool set)
@@ -23442,6 +26964,68 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_orf_capabi
return NB_OK;
}
+static int
+bgp_unnumbered_neighbor_afi_safi_rmap_modify(struct nb_cb_modify_args *args,
+ int direct)
+{
+ struct bgp *bgp;
+ const char *peer_str;
+ struct peer *peer;
+ const struct lyd_node *nbr_dnode;
+ const struct lyd_node *nbr_af_dnode;
+ const char *af_name;
+ afi_t afi;
+ safi_t safi;
+ const char *name_str;
+ struct route_map *route_map;
+ int ret;
+
+ nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi");
+ af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name");
+ yang_afi_safi_identity2value(af_name, &afi, &safi);
+
+ nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "unnumbered-neighbor");
+ bgp = nb_running_get_entry(nbr_dnode, NULL, true);
+ peer_str = yang_dnode_get_string(nbr_dnode, "./interface");
+ peer = bgp_unnumbered_neighbor_peer_lookup(bgp, peer_str, args->errmsg,
+ args->errmsg_len);
+
+ name_str = yang_dnode_get_string(args->dnode, NULL);
+ route_map = route_map_lookup_by_name(name_str);
+ ret = peer_route_map_set(peer, afi, safi, direct, name_str, route_map);
+
+ return bgp_nb_errmsg_return(args->errmsg, args->errmsg_len, ret);
+}
+
+static int
+bgp_unnumbered_neighbor_afi_safi_rmap_destroy(struct nb_cb_destroy_args *args,
+ int direct)
+{
+ struct bgp *bgp;
+ const char *peer_str;
+ struct peer *peer;
+ const struct lyd_node *nbr_dnode;
+ const struct lyd_node *nbr_af_dnode;
+ const char *af_name;
+ afi_t afi;
+ safi_t safi;
+ int ret;
+
+ nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi");
+ af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name");
+ yang_afi_safi_identity2value(af_name, &afi, &safi);
+
+ nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "unnumbered-neighbor");
+ bgp = nb_running_get_entry(nbr_dnode, NULL, true);
+ peer_str = yang_dnode_get_string(nbr_dnode, "./interface");
+ peer = bgp_unnumbered_neighbor_peer_lookup(bgp, peer_str, args->errmsg,
+ args->errmsg_len);
+
+ ret = peer_route_map_unset(peer, afi, safi, direct);
+
+ return bgp_nb_errmsg_return(args->errmsg, args->errmsg_len, ret);
+}
+
/*
* XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-unicast/filter-config/rmap-import
@@ -23453,9 +27037,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_IN);
}
return NB_OK;
@@ -23468,9 +27053,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_IN);
}
return NB_OK;
@@ -23487,9 +27073,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_OUT);
}
return NB_OK;
@@ -23502,14 +27089,74 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_OUT);
}
return NB_OK;
}
+static int
+bgp_unnumbered_neighbor_afi_safi_plist_modify(struct nb_cb_modify_args *args,
+ int direct)
+{
+ struct bgp *bgp;
+ const char *peer_str;
+ struct peer *peer;
+ const struct lyd_node *nbr_dnode;
+ const struct lyd_node *nbr_af_dnode;
+ const char *af_name;
+ afi_t afi;
+ safi_t safi;
+ const char *name_str;
+
+ nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi");
+ af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name");
+ yang_afi_safi_identity2value(af_name, &afi, &safi);
+
+ nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "unnumbered-neighbor");
+ bgp = nb_running_get_entry(nbr_dnode, NULL, true);
+ peer_str = yang_dnode_get_string(nbr_dnode, "./interface");
+ peer = bgp_unnumbered_neighbor_peer_lookup(bgp, peer_str, args->errmsg,
+ args->errmsg_len);
+
+ name_str = yang_dnode_get_string(args->dnode, NULL);
+ if (peer_prefix_list_set(peer, afi, safi, direct, name_str) < 0)
+ return NB_ERR_INCONSISTENCY;
+
+ return NB_OK;
+}
+
+static int
+bgp_unnumbered_neighbor_afi_safi_plist_destroy(struct nb_cb_destroy_args *args,
+ int direct)
+{
+ struct bgp *bgp;
+ const char *peer_str;
+ struct peer *peer;
+ const struct lyd_node *nbr_dnode;
+ const struct lyd_node *nbr_af_dnode;
+ const char *af_name;
+ afi_t afi;
+ safi_t safi;
+
+ nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi");
+ af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name");
+ yang_afi_safi_identity2value(af_name, &afi, &safi);
+
+ nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "unnumbered-neighbor");
+ bgp = nb_running_get_entry(nbr_dnode, NULL, true);
+ peer_str = yang_dnode_get_string(nbr_dnode, "./interface");
+ peer = bgp_unnumbered_neighbor_peer_lookup(bgp, peer_str, args->errmsg,
+ args->errmsg_len);
+
+ if (peer_prefix_list_unset(peer, afi, safi, direct) < 0)
+ return NB_ERR_INCONSISTENCY;
+
+ return NB_OK;
+}
/*
* XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-unicast/filter-config/plist-import
@@ -23521,9 +27168,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(args,
+ FILTER_IN);
}
return NB_OK;
@@ -23536,9 +27184,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_IN);
}
return NB_OK;
@@ -23555,9 +27204,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(
+ args, FILTER_OUT);
}
return NB_OK;
@@ -23570,9 +27220,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_OUT);
}
return NB_OK;
@@ -24762,6 +28413,354 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_weight_wei
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(args,
+ FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(
+ args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/add-paths/path-type
*/
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify(
@@ -25721,6 +29720,354 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_weight_w
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(args,
+ FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(
+ args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/add-paths/path-type
*/
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify(
@@ -26680,6 +31027,355 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_weight_w
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(args,
+ FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(
+ args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/add-paths/path-type
*/
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify(
@@ -27639,6 +32335,354 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_we
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(args,
+ FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(
+ args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/add-paths/path-type
*/
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify(
@@ -30550,6 +35594,354 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_soft_reco
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(args,
+ FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(
+ args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/route-reflector/route-reflector-client
*/
int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify(
@@ -30617,6 +36009,354 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_soft_reco
return NB_OK;
}
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args,
+ RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args,
+ RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(args,
+ FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_modify(
+ args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_unnumbered_neighbor_afi_safi_plist_destroy(
+ args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-import
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-export
+ */
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
static int bgp_peer_group_afi_safi_flag_modify(struct nb_cb_modify_args *args,
uint32_t flags, bool set)
{
@@ -31755,6 +37495,62 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_orf_capability_or
return NB_OK;
}
+static int bgp_peer_group_afi_safi_rmap_modify(struct nb_cb_modify_args *args,
+ int direct)
+{
+ struct bgp *bgp;
+ const char *peer_str;
+ struct peer *peer;
+ const struct lyd_node *nbr_dnode;
+ const struct lyd_node *nbr_af_dnode;
+ const char *af_name;
+ afi_t afi;
+ safi_t safi;
+ const char *name_str;
+ struct route_map *route_map;
+ int ret;
+
+ nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi");
+ af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name");
+ yang_afi_safi_identity2value(af_name, &afi, &safi);
+ nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "peer-group");
+ bgp = nb_running_get_entry(nbr_dnode, NULL, true);
+ peer_str = yang_dnode_get_string(nbr_dnode, "./peer-group-name");
+ peer = bgp_peer_group_peer_lookup(bgp, peer_str);
+
+ name_str = yang_dnode_get_string(args->dnode, NULL);
+ route_map = route_map_lookup_by_name(name_str);
+ ret = peer_route_map_set(peer, afi, safi, direct, name_str, route_map);
+
+ return bgp_nb_errmsg_return(args->errmsg, args->errmsg_len, ret);
+}
+
+static int bgp_peer_group_afi_safi_rmap_destroy(struct nb_cb_destroy_args *args,
+ int direct)
+{
+ struct bgp *bgp;
+ const char *peer_str;
+ struct peer *peer;
+ const struct lyd_node *nbr_dnode;
+ const struct lyd_node *nbr_af_dnode;
+ const char *af_name;
+ afi_t afi;
+ safi_t safi;
+ int ret;
+
+ nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi");
+ af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name");
+ yang_afi_safi_identity2value(af_name, &afi, &safi);
+ nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "peer-group");
+ bgp = nb_running_get_entry(nbr_dnode, NULL, true);
+ peer_str = yang_dnode_get_string(nbr_dnode, "./peer-group-name");
+ peer = bgp_peer_group_peer_lookup(bgp, peer_str);
+
+ ret = peer_route_map_unset(peer, afi, safi, direct);
+
+ return bgp_nb_errmsg_return(args->errmsg, args->errmsg_len, ret);
+}
+
/*
* XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-unicast/filter-config/rmap-import
@@ -31766,9 +37562,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_rma
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN);
}
return NB_OK;
@@ -31781,9 +37577,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_rma
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN);
}
return NB_OK;
@@ -31800,9 +37596,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_rma
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT);
}
return NB_OK;
@@ -31815,14 +37611,69 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_rma
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT);
}
return NB_OK;
}
+static int bgp_peer_group_afi_safi_plist_modify(struct nb_cb_modify_args *args,
+ int direct)
+{
+ struct bgp *bgp;
+ const char *peer_str;
+ struct peer *peer;
+ const struct lyd_node *nbr_dnode;
+ const struct lyd_node *nbr_af_dnode;
+ const char *af_name;
+ afi_t afi;
+ safi_t safi;
+ const char *name_str;
+
+ nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi");
+ af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name");
+ yang_afi_safi_identity2value(af_name, &afi, &safi);
+ nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "peer-group");
+ bgp = nb_running_get_entry(nbr_dnode, NULL, true);
+ peer_str = yang_dnode_get_string(nbr_dnode, "./peer-group-name");
+ peer = bgp_peer_group_peer_lookup(bgp, peer_str);
+
+ name_str = yang_dnode_get_string(args->dnode, NULL);
+ if (peer_prefix_list_set(peer, afi, safi, direct, name_str) < 0)
+ return NB_ERR_INCONSISTENCY;
+
+ return NB_OK;
+}
+
+static int
+bgp_peer_group_afi_safi_plist_destroy(struct nb_cb_destroy_args *args,
+ int direct)
+{
+ struct bgp *bgp;
+ const char *peer_str;
+ struct peer *peer;
+ const struct lyd_node *nbr_dnode;
+ const struct lyd_node *nbr_af_dnode;
+ const char *af_name;
+ afi_t afi;
+ safi_t safi;
+
+ nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi");
+ af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name");
+ yang_afi_safi_identity2value(af_name, &afi, &safi);
+ nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "peer-group");
+ bgp = nb_running_get_entry(nbr_dnode, NULL, true);
+ peer_str = yang_dnode_get_string(nbr_dnode, "./peer-group-name");
+ peer = bgp_peer_group_peer_lookup(bgp, peer_str);
+
+ if (peer_prefix_list_unset(peer, afi, safi, direct) < 0)
+ return NB_ERR_INCONSISTENCY;
+
+ return NB_OK;
+}
+
/*
* XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-unicast/filter-config/plist-import
@@ -31834,9 +37685,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_pli
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN);
}
return NB_OK;
@@ -31849,9 +37700,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_pli
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN);
}
return NB_OK;
@@ -31868,9 +37719,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_pli
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT);
}
return NB_OK;
@@ -31883,9 +37734,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_pli
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT);
}
return NB_OK;
@@ -33074,6 +38925,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_weight_weight_att
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/add-paths/path-type
*/
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify(
@@ -34032,6 +40223,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_weight_weight_a
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/add-paths/path-type
*/
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify(
@@ -34990,6 +41521,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_weight_weight_a
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/add-paths/path-type
*/
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify(
@@ -35948,6 +42819,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_weight_we
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/add-paths/path-type
*/
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify(
@@ -36906,6 +44117,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_weight_we
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/add-paths/path-type
*/
int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_add_paths_path_type_modify(
@@ -37709,6 +45260,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_weight_weig
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/add-paths/path-type
*/
int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_add_paths_path_type_modify(
@@ -38512,6 +46403,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_weight_weig
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l2vpn-evpn/as-path-options/allow-own-as
*/
int bgp_peer_groups_peer_group_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_modify(
@@ -38856,6 +47087,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_soft_reconfigura
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/route-reflector/route-reflector-client
*/
int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify(
@@ -38922,3 +47493,343 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_soft_reconfigura
return NB_OK;
}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-import
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-export
+ */
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
+
+int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /* TODO: implement me. */
+ break;
+ }
+
+ return NB_OK;
+}
diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c
index 1a9f59db64..b7f62ec0a1 100644
--- a/bgpd/bgp_nexthop.c
+++ b/bgpd/bgp_nexthop.c
@@ -33,6 +33,7 @@
#include "nexthop.h"
#include "queue.h"
#include "filter.h"
+#include "printfrr.h"
#include "bgpd/bgpd.h"
#include "bgpd/bgp_route.h"
@@ -1020,3 +1021,50 @@ void bgp_scan_finish(struct bgp *bgp)
bgp->connected_table[afi] = NULL;
}
}
+
+char *bgp_nexthop_dump_bnc_flags(struct bgp_nexthop_cache *bnc, char *buf,
+ size_t len)
+{
+ if (bnc->flags == 0) {
+ snprintfrr(buf, len, "None ");
+ return buf;
+ }
+
+ snprintfrr(buf, len, "%s%s%s%s%s%s%s",
+ CHECK_FLAG(bnc->flags, BGP_NEXTHOP_VALID) ? "Valid " : "",
+ CHECK_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED) ? "Reg " : "",
+ CHECK_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED) ? "Conn " : "",
+ CHECK_FLAG(bnc->flags, BGP_NEXTHOP_PEER_NOTIFIED) ? "Notify "
+ : "",
+ CHECK_FLAG(bnc->flags, BGP_STATIC_ROUTE) ? "Static " : "",
+ CHECK_FLAG(bnc->flags, BGP_STATIC_ROUTE_EXACT_MATCH)
+ ? "Static Exact "
+ : "",
+ CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID)
+ ? "Label Valid "
+ : "");
+
+ return buf;
+}
+
+char *bgp_nexthop_dump_bnc_change_flags(struct bgp_nexthop_cache *bnc,
+ char *buf, size_t len)
+{
+ if (bnc->flags == 0) {
+ snprintfrr(buf, len, "None ");
+ return buf;
+ }
+
+ snprintfrr(buf, len, "%s%s%s",
+ CHECK_FLAG(bnc->change_flags, BGP_NEXTHOP_CHANGED)
+ ? "Changed "
+ : "",
+ CHECK_FLAG(bnc->change_flags, BGP_NEXTHOP_METRIC_CHANGED)
+ ? "Metric "
+ : "",
+ CHECK_FLAG(bnc->change_flags, BGP_NEXTHOP_CONNECTED_CHANGED)
+ ? "Connected "
+ : "");
+
+ return buf;
+}
diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h
index c4b913faf4..a223ff4133 100644
--- a/bgpd/bgp_nexthop.h
+++ b/bgpd/bgp_nexthop.h
@@ -100,6 +100,11 @@ struct update_subgroup;
struct bgp_dest;
struct attr;
+#define BNC_FLAG_DUMP_SIZE 180
+extern char *bgp_nexthop_dump_bnc_flags(struct bgp_nexthop_cache *bnc,
+ char *buf, size_t len);
+extern char *bgp_nexthop_dump_bnc_change_flags(struct bgp_nexthop_cache *bnc,
+ char *buf, size_t len);
extern void bgp_connected_add(struct bgp *bgp, struct connected *c);
extern void bgp_connected_delete(struct bgp *bgp, struct connected *c);
extern bool bgp_subgrp_multiaccess_check_v4(struct in_addr nexthop,
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index 29ab3d9c6c..bc5da0ee21 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -121,7 +121,7 @@ void bgp_unlink_nexthop_by_peer(struct peer *peer)
* we need both the bgp_route and bgp_nexthop pointers.
*/
int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
- afi_t afi, struct bgp_path_info *pi,
+ afi_t afi, safi_t safi, struct bgp_path_info *pi,
struct peer *peer, int connected)
{
struct bgp_nexthop_cache_head *tree = NULL;
@@ -257,7 +257,11 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
*/
if (bgp_route->inst_type == BGP_INSTANCE_TYPE_VIEW)
return 1;
- else
+ else if (safi == SAFI_UNICAST && pi
+ && pi->sub_type == BGP_ROUTE_IMPORTED && pi->extra
+ && pi->extra->num_labels) {
+ return bgp_isvalid_labeled_nexthop(bnc);
+ } else
return (bgp_isvalid_nexthop(bnc));
}
@@ -316,12 +320,17 @@ static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc,
bnc->change_flags = 0;
/* debug print the input */
- if (BGP_DEBUG(nht, NHT))
+ if (BGP_DEBUG(nht, NHT)) {
+ char bnc_buf[BNC_FLAG_DUMP_SIZE];
+
zlog_debug(
- "%s(%u): Rcvd NH update %pFX(%u) - metric %d/%d #nhops %d/%d flags 0x%x",
+ "%s(%u): Rcvd NH update %pFX(%u) - metric %d/%d #nhops %d/%d flags %s",
bnc->bgp->name_pretty, bnc->bgp->vrf_id, &nhr->prefix,
bnc->srte_color, nhr->metric, bnc->metric,
- nhr->nexthop_num, bnc->nexthop_num, bnc->flags);
+ nhr->nexthop_num, bnc->nexthop_num,
+ bgp_nexthop_dump_bnc_flags(bnc, bnc_buf,
+ sizeof(bnc_buf)));
+ }
if (nhr->metric != bnc->metric)
bnc->change_flags |= BGP_NEXTHOP_METRIC_CHANGED;
@@ -698,11 +707,17 @@ static void evaluate_paths(struct bgp_nexthop_cache *bnc)
if (BGP_DEBUG(nht, NHT)) {
char buf[PREFIX2STR_BUFFER];
+ char bnc_buf[BNC_FLAG_DUMP_SIZE];
+ char chg_buf[BNC_FLAG_DUMP_SIZE];
+
bnc_str(bnc, buf, PREFIX2STR_BUFFER);
zlog_debug(
- "NH update for %s(%u)(%s) - flags 0x%x chgflags 0x%x - evaluate paths",
- buf, bnc->srte_color, bnc->bgp->name_pretty, bnc->flags,
- bnc->change_flags);
+ "NH update for %s(%u)(%s) - flags %s chgflags %s- evaluate paths",
+ buf, bnc->srte_color, bnc->bgp->name_pretty,
+ bgp_nexthop_dump_bnc_flags(bnc, bnc_buf,
+ sizeof(bnc_buf)),
+ bgp_nexthop_dump_bnc_change_flags(bnc, chg_buf,
+ sizeof(bnc_buf)));
}
LIST_FOREACH (path, &(bnc->paths), nh_thread) {
diff --git a/bgpd/bgp_nht.h b/bgpd/bgp_nht.h
index 8451f0689d..f374e8dfa5 100644
--- a/bgpd/bgp_nht.h
+++ b/bgpd/bgp_nht.h
@@ -34,14 +34,15 @@ extern void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id);
* bgp_route - BGP instance of route
* bgp_nexthop - BGP instance of nexthop
* a - afi: AFI_IP or AF_IP6
+ * safi - safi: to check which table nhs are being imported to
* p - path for which the nexthop object is being looked up
* peer - The BGP peer associated with this NHT
* connected - True if NH MUST be a connected route
*/
extern int bgp_find_or_add_nexthop(struct bgp *bgp_route,
struct bgp *bgp_nexthop, afi_t a,
- struct bgp_path_info *p, struct peer *peer,
- int connected);
+ safi_t safi, struct bgp_path_info *p,
+ struct peer *peer, int connected);
/**
* bgp_unlink_nexthop() - Unlink the nexthop object from the path structure.
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index b7ecd8a49b..c2e2de1c73 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -1863,6 +1863,7 @@ static int bgp_notify_receive(struct peer *peer, bgp_size_t size)
bgp_notify.subcode = stream_getc(peer->curr);
bgp_notify.length = size - 2;
bgp_notify.data = NULL;
+ bgp_notify.raw_data = NULL;
/* Preserv notify code and sub code. */
peer->notify.code = bgp_notify.code;
diff --git a/bgpd/bgp_pbr.c b/bgpd/bgp_pbr.c
index a3f1eb8401..171522f8ae 100644
--- a/bgpd/bgp_pbr.c
+++ b/bgpd/bgp_pbr.c
@@ -754,7 +754,7 @@ int bgp_pbr_build_and_validate_entry(const struct prefix *p,
struct bgp_pbr_entry_main *api)
{
int ret;
- int i, action_count = 0;
+ uint32_t i, action_count = 0;
struct ecommunity *ecom;
struct ecommunity_val *ecom_eval;
struct bgp_pbr_entry_action *api_action;
@@ -2845,19 +2845,6 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
zlog_warn("PBR: Sample action Ignored");
}
}
-#if 0
- if (api->actions[i].u.za.filter
- & TRAFFIC_ACTION_DISTRIBUTE) {
- if (BGP_DEBUG(pbr, PBR)) {
- bgp_pbr_print_policy_route(api);
- zlog_warn("PBR: Distribute action Applies");
- }
- continue_loop = 0;
- /* continue forwarding entry as before
- * no action
- */
- }
-#endif /* XXX to confirm behaviour of traffic action. for now , ignore */
/* terminate action: run other filters
*/
break;
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index c4ab223b7f..f9e655b4e7 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -93,6 +93,10 @@
#include "bgpd/bgp_route_clippy.c"
#endif
+DEFINE_HOOK(bgp_snmp_update_stats,
+ (struct bgp_node *rn, struct bgp_path_info *pi, bool added),
+ (rn, pi, added))
+
/* Extern from bgp_dump.c */
extern const char *bgp_origin_str[];
extern const char *bgp_origin_long_str[];
@@ -402,6 +406,7 @@ void bgp_path_info_add(struct bgp_dest *dest, struct bgp_path_info *pi)
bgp_dest_lock_node(dest);
peer_lock(pi->peer); /* bgp_path_info peer reference */
bgp_dest_set_defer_flag(dest, false);
+ hook_call(bgp_snmp_update_stats, dest, pi, true);
}
/* Do the actual removal of info from RIB, for use by bgp_process
@@ -417,6 +422,7 @@ void bgp_path_info_reap(struct bgp_dest *dest, struct bgp_path_info *pi)
bgp_path_info_mpath_dequeue(pi);
bgp_path_info_unlock(pi);
+ hook_call(bgp_snmp_update_stats, dest, pi, false);
bgp_dest_unlock_node(dest);
}
@@ -1836,7 +1842,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
/* If community is not disabled check the no-export and local. */
if (!transparent && bgp_community_filter(peer, piattr)) {
if (bgp_debug_update(NULL, p, subgrp->update_group, 0))
- zlog_debug("%s: community filter check fail", __func__);
+ zlog_debug("%s: community filter check fail for %pFX",
+ __func__, p);
return false;
}
@@ -3505,6 +3512,36 @@ bool bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi,
return ret;
}
+static void bgp_attr_add_no_export_community(struct attr *attr)
+{
+ struct community *old;
+ struct community *new;
+ struct community *merge;
+ struct community *no_export;
+
+ old = attr->community;
+ no_export = community_str2com("no-export");
+
+ assert(no_export);
+
+ if (old) {
+ merge = community_merge(community_dup(old), no_export);
+
+ if (!old->refcnt)
+ community_free(&old);
+
+ new = community_uniq_sort(merge);
+ community_free(&merge);
+ } else {
+ new = community_dup(no_export);
+ }
+
+ community_free(&no_export);
+
+ attr->community = new;
+ attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES);
+}
+
int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
struct attr *attr, afi_t afi, safi_t safi, int type,
int sub_type, struct prefix_rd *prd, mpls_label_t *label,
@@ -3697,6 +3734,20 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
if (peer->sort == BGP_PEER_EBGP) {
+ /* rfc7999:
+ * A BGP speaker receiving an announcement tagged with the
+ * BLACKHOLE community SHOULD add the NO_ADVERTISE or
+ * NO_EXPORT community as defined in RFC1997, or a
+ * similar community, to prevent propagation of the
+ * prefix outside the local AS. The community to prevent
+ * propagation SHOULD be chosen according to the operator's
+ * routing policy.
+ */
+ if (new_attr.community
+ && community_include(new_attr.community,
+ COMMUNITY_BLACKHOLE))
+ bgp_attr_add_no_export_community(&new_attr);
+
/* If we receive the graceful-shutdown community from an eBGP
* peer we must lower local-preference */
if (new_attr.community
@@ -3754,7 +3805,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
/* Same attribute comes in. */
if (!CHECK_FLAG(pi->flags, BGP_PATH_REMOVED)
- && attrhash_cmp(pi->attr, attr_new)
+ && same_attr
&& (!has_valid_label
|| memcmp(&(bgp_path_info_extra_get(pi))->label, label,
num_labels * sizeof(mpls_label_t))
@@ -4017,7 +4068,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
nh_afi = BGP_ATTR_NH_AFI(afi, pi->attr);
if (bgp_find_or_add_nexthop(bgp, bgp_nexthop, nh_afi,
- pi, NULL, connected)
+ safi, pi, NULL, connected)
|| CHECK_FLAG(peer->flags, PEER_FLAG_IS_RFAPI_HD))
bgp_path_info_set_flag(dest, pi,
BGP_PATH_VALID);
@@ -4162,7 +4213,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
nh_afi = BGP_ATTR_NH_AFI(afi, new->attr);
- if (bgp_find_or_add_nexthop(bgp, bgp, nh_afi, new, NULL,
+ if (bgp_find_or_add_nexthop(bgp, bgp, nh_afi, safi, new, NULL,
connected)
|| CHECK_FLAG(peer->flags, PEER_FLAG_IS_RFAPI_HD))
bgp_path_info_set_flag(dest, new, BGP_PATH_VALID);
@@ -5400,7 +5451,8 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p,
bgp_nexthop = pi->extra->bgp_orig;
if (bgp_find_or_add_nexthop(bgp, bgp_nexthop,
- afi, pi, NULL, 0))
+ afi, safi, pi, NULL,
+ 0))
bgp_path_info_set_flag(dest, pi,
BGP_PATH_VALID);
else {
@@ -5452,7 +5504,7 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p,
/* Nexthop reachability check. */
if (CHECK_FLAG(bgp->flags, BGP_FLAG_IMPORT_CHECK)
&& (safi == SAFI_UNICAST || safi == SAFI_LABELED_UNICAST)) {
- if (bgp_find_or_add_nexthop(bgp, bgp, afi, new, NULL, 0))
+ if (bgp_find_or_add_nexthop(bgp, bgp, afi, safi, new, NULL, 0))
bgp_path_info_set_flag(dest, new, BGP_PATH_VALID);
else {
if (BGP_DEBUG(nht, NHT)) {
@@ -6657,6 +6709,9 @@ static void bgp_aggregate_install(
if (!attr) {
bgp_aggregate_delete(bgp, p, afi, safi, aggregate);
+ if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
+ zlog_debug("%s: %pFX null attribute", __func__,
+ p);
return;
}
@@ -7175,6 +7230,13 @@ static void bgp_add_route_to_aggregate(struct bgp *bgp,
struct ecommunity *ecommunity = NULL;
struct lcommunity *lcommunity = NULL;
+ /* If the bgp instance is being deleted or self peer is deleted
+ * then do not create aggregate route
+ */
+ if (CHECK_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS)
+ || (bgp->peer_self == NULL))
+ return;
+
/* ORIGIN attribute: If at least one route among routes that are
* aggregated has ORIGIN with the value INCOMPLETE, then the
* aggregated route must have the ORIGIN attribute with the value
@@ -7291,6 +7353,13 @@ static void bgp_remove_route_from_aggregate(struct bgp *bgp, afi_t afi,
struct lcommunity *lcommunity = NULL;
unsigned long match = 0;
+ /* If the bgp instance is being deleted or self peer is deleted
+ * then do not create aggregate route
+ */
+ if (CHECK_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS)
+ || (bgp->peer_self == NULL))
+ return;
+
if (BGP_PATH_HOLDDOWN(pi))
return;
@@ -7487,6 +7556,13 @@ int bgp_aggregate_unset(struct bgp *bgp, struct prefix *prefix, afi_t afi,
struct bgp_dest *dest;
struct bgp_aggregate *aggregate;
+ /* If the bgp instance is being deleted or self peer is deleted
+ * then do not create aggregate route
+ */
+ if (CHECK_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS)
+ || (bgp->peer_self == NULL))
+ return 0;
+
apply_mask(prefix);
/* Old configuration check. */
dest = bgp_node_lookup(bgp->aggregate[afi][safi], prefix);
@@ -8196,7 +8272,7 @@ bgp_path_selection_reason2str(enum bgp_path_selection_reason reason)
case bgp_path_selection_router_id:
return "Router ID";
case bgp_path_selection_cluster_length:
- return "Cluser length";
+ return "Cluster length";
case bgp_path_selection_stale:
return "Path Staleness";
case bgp_path_selection_local_configured:
@@ -9589,22 +9665,9 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp,
inet_ntop(AF_INET,
&attr->aggregator_addr,
buf, sizeof(buf)));
- if (attr->aggregator_as == BGP_AS_ZERO)
- json_object_boolean_true_add(
- json_path, "aggregatorAsMalformed");
- else
- json_object_boolean_false_add(
- json_path, "aggregatorAsMalformed");
} else {
- if (attr->aggregator_as == BGP_AS_ZERO)
- vty_out(vty,
- ", (aggregated by %u(malformed) %pI4)",
- attr->aggregator_as,
- &attr->aggregator_addr);
- else
- vty_out(vty, ", (aggregated by %u %pI4)",
- attr->aggregator_as,
- &attr->aggregator_addr);
+ vty_out(vty, ", (aggregated by %u %pI4)",
+ attr->aggregator_as, &attr->aggregator_addr);
}
}
@@ -11262,8 +11325,13 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp,
vty,
use_json,
json_paths);
- if (use_json && display)
- json_object_object_add(json, "paths", json_paths);
+ if (use_json) {
+ if (display)
+ json_object_object_add(json, "paths",
+ json_paths);
+ else
+ json_object_free(json_paths);
+ }
} else {
if ((dest = bgp_node_match(rib, &match)) != NULL) {
const struct prefix *dest_p = bgp_dest_get_prefix(dest);
@@ -12239,22 +12307,6 @@ struct bgp_table_stats {
double total_space;
};
-#if 0
-#define TALLY_SIGFIG 100000
-static unsigned long
-ravg_tally (unsigned long count, unsigned long oldavg, unsigned long newval)
-{
- unsigned long newtot = (count-1) * oldavg + (newval * TALLY_SIGFIG);
- unsigned long res = (newtot * TALLY_SIGFIG) / count;
- unsigned long ret = newtot / count;
-
- if ((res % TALLY_SIGFIG) > (TALLY_SIGFIG/2))
- return ret + 1;
- else
- return ret;
-}
-#endif
-
static void bgp_table_stats_rn(struct bgp_dest *dest, struct bgp_dest *top,
struct bgp_table_stats *ts, unsigned int space)
{
@@ -12269,13 +12321,6 @@ static void bgp_table_stats_rn(struct bgp_dest *dest, struct bgp_dest *top,
ts->counts[BGP_STATS_PREFIXES]++;
ts->counts[BGP_STATS_TOTPLEN] += rn_p->prefixlen;
-#if 0
- ts->counts[BGP_STATS_AVGPLEN]
- = ravg_tally (ts->counts[BGP_STATS_PREFIXES],
- ts->counts[BGP_STATS_AVGPLEN],
- rn_p->prefixlen);
-#endif
-
/* check if the prefix is included by any other announcements */
while (pdest && !bgp_dest_has_bgp_path_info_data(pdest))
pdest = bgp_dest_parent_nolock(pdest);
@@ -12312,16 +12357,6 @@ static void bgp_table_stats_rn(struct bgp_dest *dest, struct bgp_dest *top,
ts->counts[BGP_STATS_ASPATH_TOTHOPS] += hops;
ts->counts[BGP_STATS_ASPATH_TOTSIZE] += size;
-#if 0
- ts->counts[BGP_STATS_ASPATH_AVGHOPS]
- = ravg_tally (ts->counts[BGP_STATS_ASPATH_COUNT],
- ts->counts[BGP_STATS_ASPATH_AVGHOPS],
- hops);
- ts->counts[BGP_STATS_ASPATH_AVGSIZE]
- = ravg_tally (ts->counts[BGP_STATS_ASPATH_COUNT],
- ts->counts[BGP_STATS_ASPATH_AVGSIZE],
- size);
-#endif
if (highest > ts->counts[BGP_STATS_ASN_HIGHEST])
ts->counts[BGP_STATS_ASN_HIGHEST] = highest;
}
@@ -12431,15 +12466,6 @@ static int bgp_table_stats_single(struct vty *vty, struct bgp *bgp, afi_t afi,
continue;
switch (i) {
-#if 0
- case BGP_STATS_ASPATH_AVGHOPS:
- case BGP_STATS_ASPATH_AVGSIZE:
- case BGP_STATS_AVGPLEN:
- vty_out (vty, "%-30s: ", table_stats_strs[i]);
- vty_out (vty, "%12.2f",
- (float)ts.counts[i] / (float)TALLY_SIGFIG);
- break;
-#endif
case BGP_STATS_ASPATH_TOTHOPS:
case BGP_STATS_ASPATH_TOTSIZE:
if (!json) {
@@ -12742,6 +12768,7 @@ static int bgp_peer_counts(struct vty *vty, struct peer *peer, afi_t afi,
"No such neighbor or address family");
vty_out(vty, "%s\n", json_object_to_json_string(json));
json_object_free(json);
+ json_object_free(json_loop);
} else
vty_out(vty, "%% No such neighbor or address family\n");
@@ -12994,51 +13021,29 @@ static void show_adj_route_header(struct vty *vty, struct bgp *bgp,
}
}
-static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi,
- safi_t safi, enum bgp_show_adj_route_type type,
- const char *rmap_name, json_object *json,
- uint8_t show_flags)
+static void
+show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
+ afi_t afi, safi_t safi, enum bgp_show_adj_route_type type,
+ const char *rmap_name, json_object *json, json_object *json_ar,
+ json_object *json_scode, json_object *json_ocode,
+ uint8_t show_flags, int *header1, int *header2, char *rd_str,
+ unsigned long *output_count, unsigned long *filtered_count)
{
- struct bgp_table *table;
struct bgp_adj_in *ain;
struct bgp_adj_out *adj;
- unsigned long output_count = 0;
- unsigned long filtered_count = 0;
struct bgp_dest *dest;
- int header1 = 1;
struct bgp *bgp;
- int header2 = 1;
struct attr attr;
int ret;
struct update_subgroup *subgrp;
- json_object *json_scode = NULL;
- json_object *json_ocode = NULL;
- json_object *json_ar = NULL;
struct peer_af *paf;
bool route_filtered;
bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON);
bool wide = CHECK_FLAG(show_flags, BGP_SHOW_OPT_WIDE);
-
- if (use_json) {
- json_scode = json_object_new_object();
- json_ocode = json_object_new_object();
- json_ar = json_object_new_object();
-
- json_object_string_add(json_scode, "suppressed", "s");
- json_object_string_add(json_scode, "damped", "d");
- json_object_string_add(json_scode, "history", "h");
- json_object_string_add(json_scode, "valid", "*");
- json_object_string_add(json_scode, "best", ">");
- json_object_string_add(json_scode, "multipath", "=");
- json_object_string_add(json_scode, "internal", "i");
- json_object_string_add(json_scode, "ribFailure", "r");
- json_object_string_add(json_scode, "stale", "S");
- json_object_string_add(json_scode, "removed", "R");
-
- json_object_string_add(json_ocode, "igp", "i");
- json_object_string_add(json_ocode, "egp", "e");
- json_object_string_add(json_ocode, "incomplete", "?");
- }
+ bool show_rd = ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP)
+ || (safi == SAFI_EVPN))
+ ? true
+ : false;
bgp = peer->bgp;
@@ -13052,13 +13057,6 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi,
return;
}
- /* labeled-unicast routes live in the unicast table */
- if (safi == SAFI_LABELED_UNICAST)
- table = bgp->rib[afi][SAFI_UNICAST];
- else
- table = bgp->rib[afi][safi];
-
- output_count = filtered_count = 0;
subgrp = peer_subgroup(peer, afi, safi);
if (type == bgp_show_adj_route_advertised && subgrp
@@ -13102,7 +13100,7 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi,
vty_out(vty, "Originating default network %s\n\n",
(afi == AFI_IP) ? "0.0.0.0/0" : "::/0");
}
- header1 = 0;
+ *header1 = 0;
}
for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) {
@@ -13112,9 +13110,23 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi,
if (ain->peer != peer)
continue;
- show_adj_route_header(
- vty, bgp, table, &header1, &header2,
- json, json_scode, json_ocode, wide);
+ show_adj_route_header(vty, bgp, table, header1,
+ header2, json, json_scode,
+ json_ocode, wide);
+
+ if ((safi == SAFI_MPLS_VPN)
+ || (safi == SAFI_ENCAP)
+ || (safi == SAFI_EVPN)) {
+ if (use_json)
+ json_object_string_add(
+ json_ar, "rd", rd_str);
+ else if (show_rd && rd_str) {
+ vty_out(vty,
+ "Route Distinguisher: %s\n",
+ rd_str);
+ show_rd = false;
+ }
+ }
attr = *ain->attr;
route_filtered = false;
@@ -13140,14 +13152,14 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi,
continue;
}
- if (type == bgp_show_adj_route_received &&
- (route_filtered || ret == RMAP_DENY))
- filtered_count++;
+ if (type == bgp_show_adj_route_received
+ && (route_filtered || ret == RMAP_DENY))
+ (*filtered_count)++;
route_vty_out_tmp(vty, rn_p, &attr, safi,
use_json, json_ar, wide);
bgp_attr_undup(&attr, ain->attr);
- output_count++;
+ (*output_count)++;
}
} else if (type == bgp_show_adj_route_advertised) {
RB_FOREACH (adj, bgp_adj_out_rb, &dest->adj_out)
@@ -13155,10 +13167,10 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi,
if (paf->peer != peer || !adj->attr)
continue;
- show_adj_route_header(
- vty, bgp, table, &header1,
- &header2, json, json_scode,
- json_ocode, wide);
+ show_adj_route_header(vty, bgp, table,
+ header1, header2,
+ json, json_scode,
+ json_ocode, wide);
const struct prefix *rn_p =
bgp_dest_get_prefix(dest);
@@ -13169,13 +13181,29 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi,
rmap_name);
if (ret != RMAP_DENY) {
+ if ((safi == SAFI_MPLS_VPN)
+ || (safi == SAFI_ENCAP)
+ || (safi == SAFI_EVPN)) {
+ if (use_json)
+ json_object_string_add(
+ json_ar,
+ "rd",
+ rd_str);
+ else if (show_rd
+ && rd_str) {
+ vty_out(vty,
+ "Route Distinguisher: %s\n",
+ rd_str);
+ show_rd = false;
+ }
+ }
route_vty_out_tmp(
vty, rn_p, &attr, safi,
use_json, json_ar,
wide);
- output_count++;
+ (*output_count)++;
} else {
- filtered_count++;
+ (*filtered_count)++;
}
bgp_attr_undup(&attr, adj->attr);
@@ -13183,9 +13211,9 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi,
} else if (type == bgp_show_adj_route_bestpath) {
struct bgp_path_info *pi;
- show_adj_route_header(vty, bgp, table, &header1,
- &header2, json, json_scode,
- json_ocode, wide);
+ show_adj_route_header(vty, bgp, table, header1, header2,
+ json, json_scode, json_ocode,
+ wide);
for (pi = bgp_dest_get_bgp_path_info(dest); pi;
pi = pi->next) {
@@ -13199,46 +13227,67 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi,
bgp_dest_get_prefix(dest),
pi->attr, safi, use_json,
json_ar, wide);
- output_count++;
+ (*output_count)++;
}
}
}
-
- if (use_json) {
- json_object_object_add(json, "advertisedRoutes", json_ar);
- json_object_int_add(json, "totalPrefixCounter", output_count);
- json_object_int_add(json, "filteredPrefixCounter",
- filtered_count);
-
- vty_out(vty, "%s\n", json_object_to_json_string_ext(
- json, JSON_C_TO_STRING_PRETTY));
-
- if (!output_count && !filtered_count) {
- json_object_free(json_scode);
- json_object_free(json_ocode);
- }
-
- json_object_free(json);
- } else if (output_count > 0) {
- if (filtered_count > 0)
- vty_out(vty,
- "\nTotal number of prefixes %ld (%ld filtered)\n",
- output_count, filtered_count);
- else
- vty_out(vty, "\nTotal number of prefixes %ld\n",
- output_count);
- }
}
static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi,
safi_t safi, enum bgp_show_adj_route_type type,
const char *rmap_name, uint8_t show_flags)
{
+ struct bgp *bgp;
+ struct bgp_table *table;
json_object *json = NULL;
+ json_object *json_scode = NULL;
+ json_object *json_ocode = NULL;
+ json_object *json_ar = NULL;
bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON);
- if (use_json)
+ /* Init BGP headers here so they're only displayed once
+ * even if 'table' is 2-tier (MPLS_VPN, ENCAP, EVPN).
+ */
+ int header1 = 1;
+ int header2 = 1;
+
+ /*
+ * Initialize variables for each RD
+ * All prefixes under an RD is aggregated within "json_routes"
+ */
+ char rd_str[BUFSIZ] = {0};
+ json_object *json_routes = NULL;
+
+
+ /* For 2-tier tables, prefix counts need to be
+ * maintained across multiple runs of show_adj_route()
+ */
+ unsigned long output_count_per_rd;
+ unsigned long filtered_count_per_rd;
+ unsigned long output_count = 0;
+ unsigned long filtered_count = 0;
+
+ if (use_json) {
json = json_object_new_object();
+ json_ar = json_object_new_object();
+ json_scode = json_object_new_object();
+ json_ocode = json_object_new_object();
+
+ json_object_string_add(json_scode, "suppressed", "s");
+ json_object_string_add(json_scode, "damped", "d");
+ json_object_string_add(json_scode, "history", "h");
+ json_object_string_add(json_scode, "valid", "*");
+ json_object_string_add(json_scode, "best", ">");
+ json_object_string_add(json_scode, "multipath", "=");
+ json_object_string_add(json_scode, "internal", "i");
+ json_object_string_add(json_scode, "ribFailure", "r");
+ json_object_string_add(json_scode, "stale", "S");
+ json_object_string_add(json_scode, "removed", "R");
+
+ json_object_string_add(json_ocode, "igp", "i");
+ json_object_string_add(json_ocode, "egp", "e");
+ json_object_string_add(json_ocode, "incomplete", "?");
+ }
if (!peer || !peer->afc[afi][safi]) {
if (use_json) {
@@ -13270,7 +13319,84 @@ static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi,
return CMD_WARNING;
}
- show_adj_route(vty, peer, afi, safi, type, rmap_name, json, show_flags);
+ bgp = peer->bgp;
+
+ /* labeled-unicast routes live in the unicast table */
+ if (safi == SAFI_LABELED_UNICAST)
+ table = bgp->rib[afi][SAFI_UNICAST];
+ else
+ table = bgp->rib[afi][safi];
+
+ if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP)
+ || (safi == SAFI_EVPN)) {
+
+ struct bgp_dest *dest;
+
+ for (dest = bgp_table_top(table); dest;
+ dest = bgp_route_next(dest)) {
+ table = bgp_dest_get_bgp_table_info(dest);
+ if (!table)
+ continue;
+
+ output_count_per_rd = 0;
+ filtered_count_per_rd = 0;
+
+ if (use_json)
+ json_routes = json_object_new_object();
+
+ const struct prefix_rd *prd;
+ prd = (const struct prefix_rd *)bgp_dest_get_prefix(
+ dest);
+
+ prefix_rd2str(prd, rd_str, sizeof(rd_str));
+
+ show_adj_route(vty, peer, table, afi, safi, type,
+ rmap_name, json, json_routes, json_scode,
+ json_ocode, show_flags, &header1,
+ &header2, rd_str, &output_count_per_rd,
+ &filtered_count_per_rd);
+
+ /* Don't include an empty RD in the output! */
+ if (json_routes && (output_count_per_rd > 0))
+ json_object_object_add(json_ar, rd_str,
+ json_routes);
+
+ output_count += output_count_per_rd;
+ filtered_count += filtered_count_per_rd;
+ }
+ } else
+ show_adj_route(vty, peer, table, afi, safi, type, rmap_name,
+ json, json_ar, json_scode, json_ocode,
+ show_flags, &header1, &header2, rd_str,
+ &output_count, &filtered_count);
+
+ if (use_json) {
+ json_object_object_add(json, "advertisedRoutes", json_ar);
+ json_object_int_add(json, "totalPrefixCounter", output_count);
+ json_object_int_add(json, "filteredPrefixCounter",
+ filtered_count);
+
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+
+ if (!output_count && !filtered_count) {
+ json_object_free(json_scode);
+ json_object_free(json_ocode);
+ }
+
+ if (json)
+ json_object_free(json);
+
+ } else if (output_count > 0) {
+ if (filtered_count > 0)
+ vty_out(vty,
+ "\nTotal number of prefixes %ld (%ld filtered)\n",
+ output_count, filtered_count);
+ else
+ vty_out(vty, "\nTotal number of prefixes %ld\n",
+ output_count);
+ }
return CMD_SUCCESS;
}
diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h
index bdbf4743ab..1060d2e60d 100644
--- a/bgpd/bgp_route.h
+++ b/bgpd/bgp_route.h
@@ -558,6 +558,8 @@ DECLARE_HOOK(bgp_process,
#define BGP_SHOW_OPT_AFI_ALL (1 << 2)
#define BGP_SHOW_OPT_AFI_IP (1 << 3)
#define BGP_SHOW_OPT_AFI_IP6 (1 << 4)
+#define BGP_SHOW_OPT_ESTABLISHED (1 << 5)
+#define BGP_SHOW_OPT_FAILED (1 << 6)
/* Prototypes. */
extern void bgp_rib_remove(struct bgp_dest *dest, struct bgp_path_info *pi,
diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c
index 0f4f26e3ee..3dc2cfbd5c 100644
--- a/bgpd/bgp_routemap.c
+++ b/bgpd/bgp_routemap.c
@@ -65,6 +65,7 @@
#include "bgpd/bgp_flowspec_util.h"
#include "bgpd/bgp_encap_types.h"
#include "bgpd/bgp_mpath.h"
+#include "bgpd/bgp_script.h"
#ifdef ENABLE_BGP_VNC
#include "bgpd/rfapi/bgp_rfapi_cfg.h"
@@ -337,99 +338,138 @@ static const struct route_map_rule_cmd route_match_peer_cmd = {
route_match_peer_free
};
-#if defined(HAVE_LUA)
-static enum route_map_cmd_result_t
-route_match_command(void *rule, const struct prefix *prefix, void *object)
-{
- int status = RMAP_NOMATCH;
- u_int32_t locpref = 0;
- u_int32_t newlocpref = 0;
- enum lua_rm_status lrm_status;
- struct bgp_path_info *path = (struct bgp_path_info *)object;
- lua_State *L = lua_initialize("/etc/frr/lua.scr");
-
- if (L == NULL)
- return status;
+#ifdef HAVE_SCRIPTING
+enum frrlua_rm_status {
/*
- * Setup the prefix information to pass in
+ * Script function run failure. This will translate into a deny
*/
- lua_setup_prefix_table(L, prefix);
-
- zlog_debug("Set up prefix table");
+ LUA_RM_FAILURE = 0,
/*
- * Setup the bgp_path_info information
+ * No Match was found for the route map function
*/
- lua_newtable(L);
- lua_pushinteger(L, path->attr->med);
- lua_setfield(L, -2, "metric");
- lua_pushinteger(L, path->attr->nh_ifindex);
- lua_setfield(L, -2, "ifindex");
- lua_pushstring(L, path->attr->aspath->str);
- lua_setfield(L, -2, "aspath");
- lua_pushinteger(L, path->attr->local_pref);
- lua_setfield(L, -2, "localpref");
- zlog_debug("%s %d", path->attr->aspath->str, path->attr->nh_ifindex);
- lua_setglobal(L, "nexthop");
-
- zlog_debug("Set up nexthop information");
+ LUA_RM_NOMATCH,
/*
- * Run the rule
+ * Match was found but no changes were made to the incoming data.
*/
- lrm_status = lua_run_rm_rule(L, rule);
- switch (lrm_status) {
+ LUA_RM_MATCH,
+ /*
+ * Match was found and data was modified, so figure out what changed
+ */
+ LUA_RM_MATCH_AND_CHANGE,
+};
+
+static enum route_map_cmd_result_t
+route_match_script(void *rule, const struct prefix *prefix, void *object)
+{
+ const char *scriptname = rule;
+ struct bgp_path_info *path = (struct bgp_path_info *)object;
+
+ struct frrscript *fs = frrscript_load(scriptname, NULL);
+
+ if (!fs) {
+ zlog_err("Issue loading script rule; defaulting to no match");
+ return RMAP_NOMATCH;
+ }
+
+ enum frrlua_rm_status status_failure = LUA_RM_FAILURE,
+ status_nomatch = LUA_RM_NOMATCH,
+ status_match = LUA_RM_MATCH,
+ status_match_and_change = LUA_RM_MATCH_AND_CHANGE;
+
+ /* Make result values available */
+ struct frrscript_env env[] = {
+ {"integer", "RM_FAILURE", &status_failure},
+ {"integer", "RM_NOMATCH", &status_nomatch},
+ {"integer", "RM_MATCH", &status_match},
+ {"integer", "RM_MATCH_AND_CHANGE", &status_match_and_change},
+ {"integer", "action", &status_failure},
+ {"prefix", "prefix", prefix},
+ {"attr", "attributes", path->attr},
+ {"peer", "peer", path->peer},
+ {}};
+
+ struct frrscript_env results[] = {
+ {"integer", "action"},
+ {"attr", "attributes"},
+ {},
+ };
+
+ int result = frrscript_call(fs, env);
+
+ if (result) {
+ zlog_err("Issue running script rule; defaulting to no match");
+ return RMAP_NOMATCH;
+ }
+
+ enum frrlua_rm_status *lrm_status =
+ frrscript_get_result(fs, &results[0]);
+
+ int status = RMAP_NOMATCH;
+
+ switch (*lrm_status) {
case LUA_RM_FAILURE:
- zlog_debug("RM_FAILURE");
+ zlog_err(
+ "Executing route-map match script '%s' failed; defaulting to no match",
+ scriptname);
+ status = RMAP_NOMATCH;
break;
case LUA_RM_NOMATCH:
- zlog_debug("RM_NOMATCH");
+ status = RMAP_NOMATCH;
break;
case LUA_RM_MATCH_AND_CHANGE:
- zlog_debug("MATCH AND CHANGE");
- lua_getglobal(L, "nexthop");
- path->attr->med = get_integer(L, "metric");
- /*
- * This needs to be abstraced with the set function
- */
+ status = RMAP_MATCH;
+ zlog_debug("Updating attribute based on script's values");
+
+ uint32_t locpref = 0;
+ struct attr *newattr = frrscript_get_result(fs, &results[1]);
+
+ path->attr->med = newattr->med;
+
if (path->attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF))
locpref = path->attr->local_pref;
- newlocpref = get_integer(L, "localpref");
- if (newlocpref != locpref) {
- path->attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF);
- path->attr->local_pref = newlocpref;
+ if (locpref != newattr->local_pref) {
+ SET_FLAG(path->attr->flag,
+ ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF));
+ path->attr->local_pref = newattr->local_pref;
}
- status = RMAP_MATCH;
+
+ aspath_free(newattr->aspath);
+ XFREE(MTYPE_TMP, newattr);
break;
case LUA_RM_MATCH:
- zlog_debug("MATCH ONLY");
status = RMAP_MATCH;
break;
}
- lua_close(L);
+
+ XFREE(MTYPE_TMP, lrm_status);
+ frrscript_unload(fs);
+
return status;
}
-static void *route_match_command_compile(const char *arg)
+static void *route_match_script_compile(const char *arg)
{
- char *command;
+ char *scriptname;
+
+ scriptname = XSTRDUP(MTYPE_ROUTE_MAP_COMPILED, arg);
- command = XSTRDUP(MTYPE_ROUTE_MAP_COMPILED, arg);
- return command;
+ return scriptname;
}
-static void
-route_match_command_free(void *rule)
+static void route_match_script_free(void *rule)
{
XFREE(MTYPE_ROUTE_MAP_COMPILED, rule);
}
-static const struct route_map_rule_cmd route_match_command_cmd = {
- "command",
- route_match_command,
- route_match_command_compile,
- route_match_command_free
+static const struct route_map_rule_cmd route_match_script_cmd = {
+ "script",
+ route_match_script,
+ route_match_script_compile,
+ route_match_script_free
};
-#endif
+
+#endif /* HAVE_SCRIPTING */
/* `match ip address IP_ACCESS_LIST' */
@@ -3491,8 +3531,9 @@ static void bgp_route_map_process_peer(const char *rmap_name,
PEER_FLAG_SOFT_RECONFIG)) {
if (bgp_debug_update(peer, NULL, NULL, 1))
zlog_debug(
- "Processing route_map %s update on peer %s (inbound, soft-reconfig)",
- rmap_name, peer->host);
+ "Processing route_map %s(%s:%s) update on peer %s (inbound, soft-reconfig)",
+ rmap_name, afi2str(afi),
+ safi2str(safi), peer->host);
bgp_soft_reconfig_in(peer, afi, safi);
} else if (CHECK_FLAG(peer->cap,
@@ -3501,8 +3542,9 @@ static void bgp_route_map_process_peer(const char *rmap_name,
PEER_CAP_REFRESH_NEW_RCV)) {
if (bgp_debug_update(peer, NULL, NULL, 1))
zlog_debug(
- "Processing route_map %s update on peer %s (inbound, route-refresh)",
- rmap_name, peer->host);
+ "Processing route_map %s(%s:%s) update on peer %s (inbound, route-refresh)",
+ rmap_name, afi2str(afi),
+ safi2str(safi), peer->host);
bgp_route_refresh_send(
peer, afi, safi, 0, 0, 0,
BGP_ROUTE_REFRESH_NORMAL);
@@ -3641,8 +3683,9 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name,
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug(
- "Processing route_map %s update on table map",
- rmap_name);
+ "Processing route_map %s(%s:%s) update on table map",
+ rmap_name, afi2str(afi),
+ safi2str(safi));
if (route_update)
bgp_zebra_announce_table(bgp, afi, safi);
}
@@ -3669,8 +3712,9 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name,
if (bgp_debug_zebra(bn_p))
zlog_debug(
- "Processing route_map %s update on static route %s",
- rmap_name,
+ "Processing route_map %s(%s:%s) update on static route %s",
+ rmap_name, afi2str(afi),
+ safi2str(safi),
inet_ntop(bn_p->family,
&bn_p->u.prefix, buf,
INET6_ADDRSTRLEN));
@@ -3720,8 +3764,9 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name,
if (bgp_debug_zebra(bn_p))
zlog_debug(
- "Processing route_map %s update on aggregate-address route %s",
- rmap_name,
+ "Processing route_map %s(%s:%s) update on aggregate-address route %s",
+ rmap_name, afi2str(afi),
+ safi2str(safi),
inet_ntop(bn_p->family,
&bn_p->u.prefix, buf,
INET6_ADDRSTRLEN));
@@ -3756,8 +3801,9 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name,
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug(
- "Processing route_map %s update on redistributed routes",
- rmap_name);
+ "Processing route_map %s(%s:%s) update on redistributed routes",
+ rmap_name, afi2str(afi),
+ safi2str(safi));
bgp_redistribute_resend(bgp, afi, i,
red->instance);
@@ -3776,8 +3822,8 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name,
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug(
- "Processing route_map %s update on advertise type5 route command",
- rmap_name);
+ "Processing route_map %s(%s:%s) update on advertise type5 route command",
+ rmap_name, afi2str(afi), safi2str(safi));
if (route_update && advertise_type5_routes(bgp, afi)) {
bgp_evpn_withdraw_type5_routes(bgp, afi, safi);
@@ -4096,30 +4142,29 @@ DEFUN (no_match_peer,
RMAP_EVENT_MATCH_DELETED);
}
-#if defined(HAVE_LUA)
-DEFUN (match_command,
- match_command_cmd,
- "match command WORD",
- MATCH_STR
- "Run a command to match\n"
- "The command to run\n")
-{
- return bgp_route_match_add(vty, "command", argv[2]->arg,
- RMAP_EVENT_FILTER_ADDED);
-}
-
-DEFUN (no_match_command,
- no_match_command_cmd,
- "no match command WORD",
+#ifdef HAVE_SCRIPTING
+DEFUN (match_script,
+ match_script_cmd,
+ "[no] match script WORD",
NO_STR
MATCH_STR
- "Run a command to match\n"
- "The command to run\n")
+ "Execute script to determine match\n"
+ "The script name to run, without .lua; e.g. 'myroutemap' to run myroutemap.lua\n")
{
- return bgp_route_match_delete(vty, "command", argv[3]->arg,
- RMAP_EVENT_FILTER_DELETED);
+ bool no = strmatch(argv[0]->text, "no");
+ int i = 0;
+ argv_find(argv, argc, "WORD", &i);
+ const char *script = argv[i]->arg;
+
+ if (no) {
+ return bgp_route_match_delete(vty, "script", script,
+ RMAP_EVENT_FILTER_DELETED);
+ } else {
+ return bgp_route_match_add(vty, "script", script,
+ RMAP_EVENT_FILTER_ADDED);
+ }
}
-#endif
+#endif /* HAVE_SCRIPTING */
/* match probability */
DEFUN (match_probability,
@@ -4783,6 +4828,11 @@ DEFUN (set_community,
buffer_putstr(b, "no-export");
continue;
}
+ if (strncmp(argv[i]->arg, "blackhole", strlen(argv[i]->arg))
+ == 0) {
+ buffer_putstr(b, "blackhole");
+ continue;
+ }
if (strncmp(argv[i]->arg, "graceful-shutdown",
strlen(argv[i]->arg))
== 0) {
@@ -5633,8 +5683,8 @@ void bgp_route_map_init(void)
route_map_install_match(&route_match_peer_cmd);
route_map_install_match(&route_match_local_pref_cmd);
-#if defined(HAVE_LUA)
- route_map_install_match(&route_match_command_cmd);
+#ifdef HAVE_SCRIPTING
+ route_map_install_match(&route_match_script_cmd);
#endif
route_map_install_match(&route_match_ip_address_cmd);
route_map_install_match(&route_match_ip_next_hop_cmd);
@@ -5798,9 +5848,8 @@ void bgp_route_map_init(void)
install_element(RMAP_NODE, &no_set_ipv6_nexthop_prefer_global_cmd);
install_element(RMAP_NODE, &set_ipv6_nexthop_peer_cmd);
install_element(RMAP_NODE, &no_set_ipv6_nexthop_peer_cmd);
-#if defined(HAVE_LUA)
- install_element(RMAP_NODE, &match_command_cmd);
- install_element(RMAP_NODE, &no_match_command_cmd);
+#ifdef HAVE_SCRIPTING
+ install_element(RMAP_NODE, &match_script_cmd);
#endif
}
diff --git a/bgpd/bgp_script.c b/bgpd/bgp_script.c
new file mode 100644
index 0000000000..0cda1927f8
--- /dev/null
+++ b/bgpd/bgp_script.c
@@ -0,0 +1,192 @@
+/* BGP scripting foo
+ * Copyright (C) 2020 NVIDIA Corporation
+ * Quentin Young
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#ifdef HAVE_SCRIPTING
+
+#include "bgpd.h"
+#include "bgp_script.h"
+#include "bgp_debug.h"
+#include "bgp_aspath.h"
+#include "frratomic.h"
+#include "frrscript.h"
+#include "frrlua.h"
+
+static void lua_pushpeer(lua_State *L, const struct peer *peer)
+{
+ lua_newtable(L);
+ lua_pushinteger(L, peer->as);
+ lua_setfield(L, -2, "remote_as");
+ lua_pushinteger(L, peer->local_as);
+ lua_setfield(L, -2, "local_as");
+ lua_pushinaddr(L, &peer->remote_id);
+ lua_setfield(L, -2, "remote_id");
+ lua_pushinaddr(L, &peer->local_id);
+ lua_setfield(L, -2, "local_id");
+ lua_pushstring(L, lookup_msg(bgp_status_msg, peer->status, NULL));
+ lua_setfield(L, -2, "state");
+ lua_pushstring(L, peer->desc ? peer->desc : "");
+ lua_setfield(L, -2, "description");
+ lua_pushtimet(L, &peer->uptime);
+ lua_setfield(L, -2, "uptime");
+ lua_pushtimet(L, &peer->readtime);
+ lua_setfield(L, -2, "last_readtime");
+ lua_pushtimet(L, &peer->resettime);
+ lua_setfield(L, -2, "last_resettime");
+ lua_pushsockunion(L, peer->su_local);
+ lua_setfield(L, -2, "local_address");
+ lua_pushsockunion(L, peer->su_remote);
+ lua_setfield(L, -2, "remote_address");
+ lua_pushinteger(L, peer->cap);
+ lua_setfield(L, -2, "capabilities");
+ lua_pushinteger(L, peer->flags);
+ lua_setfield(L, -2, "flags");
+ lua_pushstring(L, peer->password ? peer->password : "");
+ lua_setfield(L, -2, "password");
+
+ /* Nested tables here */
+ lua_newtable(L);
+ {
+ lua_newtable(L);
+ {
+ lua_pushinteger(L, peer->holdtime);
+ lua_setfield(L, -2, "hold");
+ lua_pushinteger(L, peer->keepalive);
+ lua_setfield(L, -2, "keepalive");
+ lua_pushinteger(L, peer->connect);
+ lua_setfield(L, -2, "connect");
+ lua_pushinteger(L, peer->routeadv);
+ lua_setfield(L, -2, "route_advertisement");
+ }
+ lua_setfield(L, -2, "configured");
+
+ lua_newtable(L);
+ {
+ lua_pushinteger(L, peer->v_holdtime);
+ lua_setfield(L, -2, "hold");
+ lua_pushinteger(L, peer->v_keepalive);
+ lua_setfield(L, -2, "keepalive");
+ lua_pushinteger(L, peer->v_connect);
+ lua_setfield(L, -2, "connect");
+ lua_pushinteger(L, peer->v_routeadv);
+ lua_setfield(L, -2, "route_advertisement");
+ }
+ lua_setfield(L, -2, "negotiated");
+ }
+ lua_setfield(L, -2, "timers");
+
+ lua_newtable(L);
+ {
+ lua_pushinteger(L, atomic_load_explicit(&peer->open_in,
+ memory_order_relaxed));
+ lua_setfield(L, -2, "open_in");
+ lua_pushinteger(L, atomic_load_explicit(&peer->open_out,
+ memory_order_relaxed));
+ lua_setfield(L, -2, "open_out");
+ lua_pushinteger(L, atomic_load_explicit(&peer->update_in,
+ memory_order_relaxed));
+ lua_setfield(L, -2, "update_in");
+ lua_pushinteger(L, atomic_load_explicit(&peer->update_out,
+ memory_order_relaxed));
+ lua_setfield(L, -2, "update_out");
+ lua_pushinteger(L, atomic_load_explicit(&peer->update_time,
+ memory_order_relaxed));
+ lua_setfield(L, -2, "update_time");
+ lua_pushinteger(L, atomic_load_explicit(&peer->keepalive_in,
+ memory_order_relaxed));
+ lua_setfield(L, -2, "keepalive_in");
+ lua_pushinteger(L, atomic_load_explicit(&peer->keepalive_out,
+ memory_order_relaxed));
+ lua_setfield(L, -2, "keepalive_out");
+ lua_pushinteger(L, atomic_load_explicit(&peer->notify_in,
+ memory_order_relaxed));
+ lua_setfield(L, -2, "notify_in");
+ lua_pushinteger(L, atomic_load_explicit(&peer->notify_out,
+ memory_order_relaxed));
+ lua_setfield(L, -2, "notify_out");
+ lua_pushinteger(L, atomic_load_explicit(&peer->refresh_in,
+ memory_order_relaxed));
+ lua_setfield(L, -2, "refresh_in");
+ lua_pushinteger(L, atomic_load_explicit(&peer->refresh_out,
+ memory_order_relaxed));
+ lua_setfield(L, -2, "refresh_out");
+ lua_pushinteger(L, atomic_load_explicit(&peer->dynamic_cap_in,
+ memory_order_relaxed));
+ lua_setfield(L, -2, "dynamic_cap_in");
+ lua_pushinteger(L, atomic_load_explicit(&peer->dynamic_cap_out,
+ memory_order_relaxed));
+ lua_setfield(L, -2, "dynamic_cap_out");
+ lua_pushinteger(L, peer->established);
+ lua_setfield(L, -2, "times_established");
+ lua_pushinteger(L, peer->dropped);
+ lua_setfield(L, -2, "times_dropped");
+ }
+ lua_setfield(L, -2, "stats");
+}
+
+static void lua_pushattr(lua_State *L, const struct attr *attr)
+{
+ lua_newtable(L);
+ lua_pushinteger(L, attr->med);
+ lua_setfield(L, -2, "metric");
+ lua_pushinteger(L, attr->nh_ifindex);
+ lua_setfield(L, -2, "ifindex");
+ lua_pushstring(L, attr->aspath->str);
+ lua_setfield(L, -2, "aspath");
+ lua_pushinteger(L, attr->local_pref);
+ lua_setfield(L, -2, "localpref");
+}
+
+static void *lua_toattr(lua_State *L, int idx)
+{
+ struct attr *attr = XCALLOC(MTYPE_TMP, sizeof(struct attr));
+
+ lua_getfield(L, -1, "metric");
+ attr->med = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+ lua_getfield(L, -1, "ifindex");
+ attr->nh_ifindex = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+ lua_getfield(L, -1, "aspath");
+ attr->aspath = aspath_str2aspath(lua_tostring(L, -1));
+ lua_pop(L, 1);
+ lua_getfield(L, -1, "localpref");
+ attr->local_pref = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+
+ return attr;
+}
+
+struct frrscript_codec frrscript_codecs_bgpd[] = {
+ {.typename = "peer",
+ .encoder = (encoder_func)lua_pushpeer,
+ .decoder = NULL},
+ {.typename = "attr",
+ .encoder = (encoder_func)lua_pushattr,
+ .decoder = lua_toattr},
+ {}};
+
+void bgp_script_init(void)
+{
+ frrscript_register_type_codecs(frrscript_codecs_bgpd);
+}
+
+#endif /* HAVE_SCRIPTING */
diff --git a/bgpd/bgp_script.h b/bgpd/bgp_script.h
new file mode 100644
index 0000000000..6682c2eebd
--- /dev/null
+++ b/bgpd/bgp_script.h
@@ -0,0 +1,34 @@
+/* BGP scripting foo
+ * Copyright (C) 2020 NVIDIA Corporation
+ * Quentin Young
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+#ifndef __BGP_SCRIPT__
+#define __BGP_SCRIPT__
+
+#include <zebra.h>
+
+#ifdef HAVE_SCRIPTING
+
+/*
+ * Initialize scripting stuff.
+ */
+void bgp_script_init(void);
+
+#endif /* HAVE_SCRIPTING */
+
+#endif /* __BGP_SCRIPT__ */
diff --git a/bgpd/bgp_snmp.c b/bgpd/bgp_snmp.c
index 303f4ca56e..71868abc5f 100644
--- a/bgpd/bgp_snmp.c
+++ b/bgpd/bgp_snmp.c
@@ -40,6 +40,7 @@
#include "bgpd/bgp_attr.h"
#include "bgpd/bgp_route.h"
#include "bgpd/bgp_fsm.h"
+#include "bgpd/bgp_mplsvpn_snmp.h"
/* BGP4-MIB described in RFC1657. */
#define BGP4MIB 1,3,6,1,2,1,15
@@ -849,8 +850,9 @@ static uint8_t *bgp4PathAttrTable(struct variable *v, oid name[],
}
/* BGP Traps. */
-static struct trap_object bgpTrapList[] = {{3, {3, 1, BGPPEERLASTERROR}},
- {3, {3, 1, BGPPEERSTATE}}};
+static struct trap_object bgpTrapList[] = {{3, {3, 1, BGPPEERREMOTEADDR} },
+ {3, {3, 1, BGPPEERLASTERROR} },
+ {3, {3, 1, BGPPEERSTATE} } };
static int bgpTrapEstablished(struct peer *peer)
{
@@ -898,6 +900,7 @@ static int bgp_snmp_init(struct thread_master *tm)
{
smux_init(tm);
REGISTER_MIB("mibII/bgp", bgp_variables, variable, bgp_oid);
+ bgp_mpls_l3vpn_module_init();
return 0;
}
diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c
index 059e05ef71..621a14014f 100644
--- a/bgpd/bgp_updgrp.c
+++ b/bgpd/bgp_updgrp.c
@@ -909,7 +909,6 @@ static void update_subgroup_add_peer(struct update_subgroup *subgrp,
bpacket_add_peer(pkt, paf);
- bpacket_queue_sanity_check(SUBGRP_PKTQ(subgrp));
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
zlog_debug("peer %s added to subgroup s%" PRIu64,
paf->peer->host, subgrp->id);
@@ -1229,8 +1228,6 @@ static int update_subgroup_copy_packets(struct update_subgroup *dest,
pkt = bpacket_next(pkt);
}
- bpacket_queue_sanity_check(SUBGRP_PKTQ(dest));
-
return count;
}
diff --git a/bgpd/bgp_updgrp.h b/bgpd/bgp_updgrp.h
index 7261933dc9..694636ef3d 100644
--- a/bgpd/bgp_updgrp.h
+++ b/bgpd/bgp_updgrp.h
@@ -109,12 +109,6 @@ struct bpacket {
struct bpacket_queue {
TAILQ_HEAD(pkt_queue, bpacket) pkts;
-#if 0
- /* A dummy packet that is used to thread all peers that have
- completed their work */
- struct bpacket sentinel;
-#endif
-
unsigned int conf_max_count;
unsigned int curr_count;
unsigned int hwm_count;
diff --git a/bgpd/bgp_updgrp_adv.c b/bgpd/bgp_updgrp_adv.c
index b1ff9ac251..fb64f010f9 100644
--- a/bgpd/bgp_updgrp_adv.c
+++ b/bgpd/bgp_updgrp_adv.c
@@ -95,6 +95,10 @@ static void adj_free(struct bgp_adj_out *adj)
{
TAILQ_REMOVE(&(adj->subgroup->adjq), adj, subgrp_adj_train);
SUBGRP_DECR_STAT(adj->subgroup, adj_count);
+
+ RB_REMOVE(bgp_adj_out_rb, &adj->dest->adj_out, adj);
+ bgp_dest_unlock_node(adj->dest);
+
XFREE(MTYPE_BGP_ADJ_OUT, adj);
}
@@ -402,11 +406,9 @@ struct bgp_adj_out *bgp_adj_out_alloc(struct update_subgroup *subgrp,
adj->subgroup = subgrp;
adj->addpath_tx_id = addpath_tx_id;
- if (dest) {
- RB_INSERT(bgp_adj_out_rb, &dest->adj_out, adj);
- bgp_dest_lock_node(dest);
- adj->dest = dest;
- }
+ RB_INSERT(bgp_adj_out_rb, &dest->adj_out, adj);
+ bgp_dest_lock_node(dest);
+ adj->dest = dest;
TAILQ_INSERT_TAIL(&(subgrp->adjq), adj, subgrp_adj_train);
SUBGRP_INCR_STAT(subgrp, adj_count);
@@ -518,10 +520,7 @@ void bgp_adj_out_set_subgroup(struct bgp_dest *dest,
/* bgp_path_info adj_out reference */
adv->pathi = bgp_path_info_lock(path);
- if (attr)
- adv->baa = bgp_advertise_intern(subgrp->hash, attr);
- else
- adv->baa = baa_new();
+ adv->baa = bgp_advertise_intern(subgrp->hash, attr);
adv->adj = adj;
adj->attr_hash = attr_hash;
@@ -604,13 +603,8 @@ void bgp_adj_out_unset_subgroup(struct bgp_dest *dest,
if (trigger_write)
subgroup_trigger_write(subgrp);
} else {
- /* Remove myself from adjacency. */
- RB_REMOVE(bgp_adj_out_rb, &dest->adj_out, adj);
-
/* Free allocated information. */
adj_free(adj);
-
- bgp_dest_unlock_node(dest);
}
}
@@ -626,7 +620,6 @@ void bgp_adj_out_remove_subgroup(struct bgp_dest *dest, struct bgp_adj_out *adj,
if (adj->adv)
bgp_advertise_clean_subgroup(subgrp, adj);
- RB_REMOVE(bgp_adj_out_rb, &dest->adj_out, adj);
adj_free(adj);
}
@@ -638,11 +631,8 @@ void subgroup_clear_table(struct update_subgroup *subgrp)
{
struct bgp_adj_out *aout, *taout;
- SUBGRP_FOREACH_ADJ_SAFE (subgrp, aout, taout) {
- struct bgp_dest *dest = aout->dest;
- bgp_adj_out_remove_subgroup(dest, aout, subgrp);
- bgp_dest_unlock_node(dest);
- }
+ SUBGRP_FOREACH_ADJ_SAFE (subgrp, aout, taout)
+ bgp_adj_out_remove_subgroup(aout->dest, aout, subgrp);
}
/*
@@ -930,14 +920,8 @@ void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw)
bgp_advertise_clean_subgroup(
subgrp, adj);
- /* Remove from adjacency. */
- RB_REMOVE(bgp_adj_out_rb,
- &dest->adj_out, adj);
-
/* Free allocated information. */
adj_free(adj);
-
- bgp_dest_unlock_node(dest);
}
}
diff --git a/bgpd/bgp_updgrp_packet.c b/bgpd/bgp_updgrp_packet.c
index 866bf8178a..a13a5395b4 100644
--- a/bgpd/bgp_updgrp_packet.c
+++ b/bgpd/bgp_updgrp_packet.c
@@ -88,39 +88,6 @@ void bpacket_queue_init(struct bpacket_queue *q)
}
/*
- * bpacket_queue_sanity_check
- */
-void bpacket_queue_sanity_check(struct bpacket_queue __attribute__((__unused__))
- * q)
-{
-#if 0
- struct bpacket *pkt;
-
- pkt = bpacket_queue_last (q);
- assert (pkt);
- assert (!pkt->buffer);
-
- /*
- * Make sure the count of packets is correct.
- */
- int num_pkts = 0;
-
- pkt = bpacket_queue_first (q);
- while (pkt)
- {
- num_pkts++;
-
- if (num_pkts > q->curr_count)
- assert (0);
-
- pkt = TAILQ_NEXT (pkt, pkt_train);
- }
-
- assert (num_pkts == q->curr_count);
-#endif
-}
-
-/*
* bpacket_queue_add_packet
*
* Internal function of bpacket_queue - and adds a
@@ -168,7 +135,6 @@ struct bpacket *bpacket_queue_add(struct bpacket_queue *q, struct stream *s,
else
bpacket_attr_vec_arr_reset(&pkt->arr);
bpacket_queue_add_packet(q, pkt);
- bpacket_queue_sanity_check(q);
return pkt;
}
@@ -176,7 +142,6 @@ struct bpacket *bpacket_queue_add(struct bpacket_queue *q, struct stream *s,
* Fill in the new information into the current sentinel and create a
* new sentinel.
*/
- bpacket_queue_sanity_check(q);
last_pkt = bpacket_queue_last(q);
assert(last_pkt->buffer == NULL);
last_pkt->buffer = s;
@@ -190,7 +155,6 @@ struct bpacket *bpacket_queue_add(struct bpacket_queue *q, struct stream *s,
pkt->ver++;
bpacket_queue_add_packet(q, pkt);
- bpacket_queue_sanity_check(q);
return last_pkt;
}
@@ -290,7 +254,6 @@ static int bpacket_queue_compact(struct bpacket_queue *q)
num_deleted++;
}
- bpacket_queue_sanity_check(q);
return num_deleted;
}
@@ -1078,7 +1041,6 @@ struct bpacket *subgroup_withdraw_packet(struct update_subgroup *subgrp)
subgrp->scount--;
bgp_adj_out_remove_subgroup(dest, adj, subgrp);
- bgp_dest_unlock_node(dest);
}
if (!stream_empty(s)) {
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index e5f1b78b66..f8ef5e2aa2 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -127,6 +127,7 @@ FRR_CFG_DEFAULT_BOOL(BGP_SUPPRESS_DUPLICATES,
DEFINE_HOOK(bgp_inst_config_write,
(struct bgp *bgp, struct vty *vty),
(bgp, vty))
+DEFINE_HOOK(bgp_snmp_update_last_changed, (struct bgp *bgp), (bgp))
#define GR_NO_OPER \
"The Graceful Restart No Operation was executed as cmd same as previous one."
@@ -5019,7 +5020,7 @@ ALIAS_HIDDEN(neighbor_set_peer_group, neighbor_set_peer_group_hidden_cmd,
DEFUN_YANG (no_neighbor_set_peer_group,
no_neighbor_set_peer_group_cmd,
- "no neighbor <A.B.C.D|X:X::X:X|WORD> peer-group PGNAME",
+ "no neighbor <A.B.C.D|X:X::X:X|WORD> peer-group [PGNAME]",
NO_STR
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
@@ -5040,7 +5041,7 @@ DEFUN_YANG (no_neighbor_set_peer_group,
}
ALIAS_HIDDEN(no_neighbor_set_peer_group, no_neighbor_set_peer_group_hidden_cmd,
- "no neighbor <A.B.C.D|X:X::X:X|WORD> peer-group PGNAME",
+ "no neighbor <A.B.C.D|X:X::X:X|WORD> peer-group [PGNAME]",
NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2
"Member of the peer-group\n"
"Peer-group name\n")
@@ -5564,7 +5565,6 @@ DEFUN_YANG (neighbor_nexthop_self,
afi_t afi = bgp_node_afi(vty);
safi_t safi = bgp_node_safi(vty);
-
snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH,
yang_afi_safi_value2identity(afi, safi));
@@ -5603,7 +5603,6 @@ DEFUN_YANG(neighbor_nexthop_self_force,
afi_t afi = bgp_node_afi(vty);
safi_t safi = bgp_node_safi(vty);
-
snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH,
yang_afi_safi_value2identity(afi, safi));
@@ -5688,7 +5687,6 @@ DEFUN_YANG (no_neighbor_nexthop_self_force,
afi_t afi = bgp_node_afi(vty);
safi_t safi = bgp_node_safi(vty);
-
snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH,
yang_afi_safi_value2identity(afi, safi));
@@ -5846,7 +5844,6 @@ DEFUN_YANG (neighbor_remove_private_as_all,
afi_t afi = bgp_node_afi(vty);
safi_t safi = bgp_node_safi(vty);
-
snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH,
yang_afi_safi_value2identity(afi, safi));
@@ -5886,7 +5883,6 @@ DEFUN_YANG (neighbor_remove_private_as_replace_as,
afi_t afi = bgp_node_afi(vty);
safi_t safi = bgp_node_safi(vty);
-
snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH,
yang_afi_safi_value2identity(afi, safi));
@@ -5927,7 +5923,6 @@ DEFUN_YANG (neighbor_remove_private_as_all_replace_as,
afi_t afi = bgp_node_afi(vty);
safi_t safi = bgp_node_safi(vty);
-
snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH,
yang_afi_safi_value2identity(afi, safi));
@@ -6008,7 +6003,6 @@ DEFUN_YANG (no_neighbor_remove_private_as_all,
afi_t afi = bgp_node_afi(vty);
safi_t safi = bgp_node_safi(vty);
-
snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH,
yang_afi_safi_value2identity(afi, safi));
@@ -6049,7 +6043,6 @@ DEFUN_YANG (no_neighbor_remove_private_as_replace_as,
afi_t afi = bgp_node_afi(vty);
safi_t safi = bgp_node_safi(vty);
-
snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH,
yang_afi_safi_value2identity(afi, safi));
@@ -6091,7 +6084,6 @@ DEFUN_YANG (no_neighbor_remove_private_as_all_replace_as,
afi_t afi = bgp_node_afi(vty);
safi_t safi = bgp_node_safi(vty);
-
snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH,
yang_afi_safi_value2identity(afi, safi));
@@ -6381,7 +6373,6 @@ DEFUN_YANG (neighbor_soft_reconfiguration,
afi_t afi = bgp_node_afi(vty);
safi_t safi = bgp_node_safi(vty);
-
snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH,
yang_afi_safi_value2identity(afi, safi));
@@ -7768,69 +7759,44 @@ ALIAS_HIDDEN(
"Filter outgoing updates\n")
/* Set prefix list to the peer. */
-static int peer_prefix_list_set_vty(struct vty *vty, const char *ip_str,
- afi_t afi, safi_t safi,
- const char *name_str,
- const char *direct_str)
-{
- int ret;
- int direct = FILTER_IN;
- struct peer *peer;
-
- peer = peer_and_group_lookup_vty(vty, ip_str);
- if (!peer)
- return CMD_WARNING_CONFIG_FAILED;
-
- /* Check filter direction. */
- if (strncmp(direct_str, "i", 1) == 0)
- direct = FILTER_IN;
- else if (strncmp(direct_str, "o", 1) == 0)
- direct = FILTER_OUT;
-
- ret = peer_prefix_list_set(peer, afi, safi, direct, name_str);
-
- return bgp_vty_return(vty, ret);
-}
-
-static int peer_prefix_list_unset_vty(struct vty *vty, const char *ip_str,
- afi_t afi, safi_t safi,
- const char *direct_str)
+DEFPY_YANG(
+ neighbor_prefix_list, neighbor_prefix_list_cmd,
+ "[no$no] neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor_str prefix-list WORD$prefix_str <in|out>$direction",
+ NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2
+ "Filter updates to/from this neighbor\n"
+ "Name of a prefix list\n"
+ "Filter incoming updates\n"
+ "Filter outgoing updates\n")
{
- int ret;
- struct peer *peer;
- int direct = FILTER_IN;
+ char base_xpath[XPATH_MAXLEN];
+ char af_xpath[XPATH_MAXLEN];
+ char plist_xpath[XPATH_MAXLEN];
+ afi_t afi = bgp_node_afi(vty);
+ safi_t safi = bgp_node_safi(vty);
- peer = peer_and_group_lookup_vty(vty, ip_str);
- if (!peer)
+ snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH,
+ yang_afi_safi_value2identity(afi, safi));
+ if (peer_and_group_lookup_nb(vty, neighbor_str, base_xpath,
+ sizeof(base_xpath), af_xpath)
+ < 0)
return CMD_WARNING_CONFIG_FAILED;
- /* Check filter direction. */
- if (strncmp(direct_str, "i", 1) == 0)
- direct = FILTER_IN;
- else if (strncmp(direct_str, "o", 1) == 0)
- direct = FILTER_OUT;
-
- ret = peer_prefix_list_unset(peer, afi, safi, direct);
+ if (strmatch(direction, "in"))
+ snprintf(plist_xpath, sizeof(plist_xpath),
+ "./%s/filter-config/plist-import",
+ bgp_afi_safi_get_container_str(afi, safi));
+ else if (strmatch(direction, "out"))
+ snprintf(plist_xpath, sizeof(plist_xpath),
+ "./%s/filter-config/plist-export",
+ bgp_afi_safi_get_container_str(afi, safi));
- return bgp_vty_return(vty, ret);
-}
+ if (!no)
+ nb_cli_enqueue_change(vty, plist_xpath, NB_OP_MODIFY,
+ prefix_str);
+ else
+ nb_cli_enqueue_change(vty, plist_xpath, NB_OP_DESTROY, NULL);
-DEFUN (neighbor_prefix_list,
- neighbor_prefix_list_cmd,
- "neighbor <A.B.C.D|X:X::X:X|WORD> prefix-list WORD <in|out>",
- NEIGHBOR_STR
- NEIGHBOR_ADDR_STR2
- "Filter updates to/from this neighbor\n"
- "Name of a prefix list\n"
- "Filter incoming updates\n"
- "Filter outgoing updates\n")
-{
- int idx_peer = 1;
- int idx_word = 3;
- int idx_in_out = 4;
- return peer_prefix_list_set_vty(
- vty, argv[idx_peer]->arg, bgp_node_afi(vty), bgp_node_safi(vty),
- argv[idx_word]->arg, argv[idx_in_out]->arg);
+ return nb_cli_apply_changes(vty, base_xpath);
}
ALIAS_HIDDEN(neighbor_prefix_list, neighbor_prefix_list_hidden_cmd,
@@ -7841,32 +7807,6 @@ ALIAS_HIDDEN(neighbor_prefix_list, neighbor_prefix_list_hidden_cmd,
"Filter incoming updates\n"
"Filter outgoing updates\n")
-DEFUN (no_neighbor_prefix_list,
- no_neighbor_prefix_list_cmd,
- "no neighbor <A.B.C.D|X:X::X:X|WORD> prefix-list WORD <in|out>",
- NO_STR
- NEIGHBOR_STR
- NEIGHBOR_ADDR_STR2
- "Filter updates to/from this neighbor\n"
- "Name of a prefix list\n"
- "Filter incoming updates\n"
- "Filter outgoing updates\n")
-{
- int idx_peer = 2;
- int idx_in_out = 5;
- return peer_prefix_list_unset_vty(vty, argv[idx_peer]->arg,
- bgp_node_afi(vty), bgp_node_safi(vty),
- argv[idx_in_out]->arg);
-}
-
-ALIAS_HIDDEN(no_neighbor_prefix_list, no_neighbor_prefix_list_hidden_cmd,
- "no neighbor <A.B.C.D|X:X::X:X|WORD> prefix-list WORD <in|out>",
- NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2
- "Filter updates to/from this neighbor\n"
- "Name of a prefix list\n"
- "Filter incoming updates\n"
- "Filter outgoing updates\n")
-
static int peer_aslist_set_vty(struct vty *vty, const char *ip_str, afi_t afi,
safi_t safi, const char *name_str,
const char *direct_str)
@@ -8027,106 +7967,54 @@ ALIAS_HIDDEN(neighbor_advertise_map, neighbor_advertise_map_hidden_cmd,
"Name of the exist or non exist map\n")
/* Set route-map to the peer. */
-static int peer_route_map_set_vty(struct vty *vty, const char *ip_str,
- afi_t afi, safi_t safi, const char *name_str,
- const char *direct_str)
-{
- int ret;
- struct peer *peer;
- int direct = RMAP_IN;
- struct route_map *route_map;
-
- peer = peer_and_group_lookup_vty(vty, ip_str);
- if (!peer)
- return CMD_WARNING_CONFIG_FAILED;
-
- /* Check filter direction. */
- if (strncmp(direct_str, "in", 2) == 0)
- direct = RMAP_IN;
- else if (strncmp(direct_str, "o", 1) == 0)
- direct = RMAP_OUT;
-
- route_map = route_map_lookup_warn_noexist(vty, name_str);
- ret = peer_route_map_set(peer, afi, safi, direct, name_str, route_map);
-
- return bgp_vty_return(vty, ret);
-}
-
-static int peer_route_map_unset_vty(struct vty *vty, const char *ip_str,
- afi_t afi, safi_t safi,
- const char *direct_str)
+DEFPY_YANG(
+ neighbor_route_map, neighbor_route_map_cmd,
+ "[no$no] neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor_str route-map WORD$rmap_str <in|out>$direction",
+ NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2
+ "Apply route map to neighbor\n"
+ "Name of route map\n"
+ "Apply map to incoming routes\n"
+ "Apply map to outbound routes\n")
{
- int ret;
- struct peer *peer;
- int direct = RMAP_IN;
+ char base_xpath[XPATH_MAXLEN];
+ char af_xpath[XPATH_MAXLEN];
+ char rmap_xpath[XPATH_MAXLEN];
+ afi_t afi = bgp_node_afi(vty);
+ safi_t safi = bgp_node_safi(vty);
- peer = peer_and_group_lookup_vty(vty, ip_str);
- if (!peer)
+ snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH,
+ yang_afi_safi_value2identity(afi, safi));
+ if (peer_and_group_lookup_nb(vty, neighbor_str, base_xpath,
+ sizeof(base_xpath), af_xpath)
+ < 0)
return CMD_WARNING_CONFIG_FAILED;
- /* Check filter direction. */
- if (strncmp(direct_str, "in", 2) == 0)
- direct = RMAP_IN;
- else if (strncmp(direct_str, "o", 1) == 0)
- direct = RMAP_OUT;
-
- ret = peer_route_map_unset(peer, afi, safi, direct);
-
- return bgp_vty_return(vty, ret);
-}
+ if (strmatch(direction, "in"))
+ snprintf(rmap_xpath, sizeof(rmap_xpath),
+ "./%s/filter-config/rmap-import",
+ bgp_afi_safi_get_container_str(afi, safi));
+ else if (strmatch(direction, "out"))
+ snprintf(rmap_xpath, sizeof(rmap_xpath),
+ "./%s/filter-config/rmap-export",
+ bgp_afi_safi_get_container_str(afi, safi));
-DEFUN (neighbor_route_map,
- neighbor_route_map_cmd,
- "neighbor <A.B.C.D|X:X::X:X|WORD> route-map WORD <in|out>",
- NEIGHBOR_STR
- NEIGHBOR_ADDR_STR2
- "Apply route map to neighbor\n"
- "Name of route map\n"
- "Apply map to incoming routes\n"
- "Apply map to outbound routes\n")
-{
- int idx_peer = 1;
- int idx_word = 3;
- int idx_in_out = 4;
- return peer_route_map_set_vty(
- vty, argv[idx_peer]->arg, bgp_node_afi(vty), bgp_node_safi(vty),
- argv[idx_word]->arg, argv[idx_in_out]->arg);
-}
+ if (!no) {
+ if (!yang_dnode_exists(
+ vty->candidate_config->dnode,
+ "/frr-route-map:lib/route-map[name='%s']",
+ rmap_str)) {
+ if (vty_shell_serv(vty))
+ vty_out(vty,
+ "The route-map '%s' does not exist.\n",
+ rmap_str);
+ }
+ nb_cli_enqueue_change(vty, rmap_xpath, NB_OP_MODIFY, rmap_str);
+ } else
+ nb_cli_enqueue_change(vty, rmap_xpath, NB_OP_DESTROY, NULL);
-ALIAS_HIDDEN(neighbor_route_map, neighbor_route_map_hidden_cmd,
- "neighbor <A.B.C.D|X:X::X:X|WORD> route-map WORD <in|out>",
- NEIGHBOR_STR NEIGHBOR_ADDR_STR2
- "Apply route map to neighbor\n"
- "Name of route map\n"
- "Apply map to incoming routes\n"
- "Apply map to outbound routes\n")
-
-DEFUN (no_neighbor_route_map,
- no_neighbor_route_map_cmd,
- "no neighbor <A.B.C.D|X:X::X:X|WORD> route-map WORD <in|out>",
- NO_STR
- NEIGHBOR_STR
- NEIGHBOR_ADDR_STR2
- "Apply route map to neighbor\n"
- "Name of route map\n"
- "Apply map to incoming routes\n"
- "Apply map to outbound routes\n")
-{
- int idx_peer = 2;
- int idx_in_out = 5;
- return peer_route_map_unset_vty(vty, argv[idx_peer]->arg,
- bgp_node_afi(vty), bgp_node_safi(vty),
- argv[idx_in_out]->arg);
+ return nb_cli_apply_changes(vty, base_xpath);
}
-ALIAS_HIDDEN(no_neighbor_route_map, no_neighbor_route_map_hidden_cmd,
- "no neighbor <A.B.C.D|X:X::X:X|WORD> route-map WORD <in|out>",
- NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2
- "Apply route map to neighbor\n"
- "Name of route map\n"
- "Apply map to incoming routes\n"
- "Apply map to outbound routes\n")
-
/* Set unsuppress-map to the peer. */
static int peer_unsuppress_map_set_vty(struct vty *vty, const char *ip_str,
afi_t afi, safi_t safi,
@@ -9254,6 +9142,7 @@ DEFPY (af_label_vpn_export,
vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi,
bgp_get_default(), bgp);
+ hook_call(bgp_snmp_update_last_changed, bgp);
return CMD_SUCCESS;
}
@@ -10687,11 +10576,29 @@ static void bgp_show_failed_summary(struct vty *vty, struct bgp *bgp,
}
}
+/* If the peer's description includes whitespaces
+ * then return the first occurrence. Also strip description
+ * to the given size if needed.
+ */
+static char *bgp_peer_description_stripped(char *desc, uint32_t size)
+{
+ static char stripped[BUFSIZ];
+ char *pnt;
+ uint32_t len = size > strlen(desc) ? strlen(desc) : size;
+
+ pnt = strchr(desc, ' ');
+ if (pnt)
+ len = size > (uint32_t)(pnt - desc) ? (uint32_t)(pnt - desc)
+ : size;
+
+ strlcpy(stripped, desc, len + 1);
+
+ return stripped;
+}
/* Show BGP peer's summary information. */
static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
- bool show_failed, bool show_established,
- bool use_json)
+ uint8_t show_flags)
{
struct peer *peer;
struct listnode *node, *nnode;
@@ -10707,6 +10614,11 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
json_object *json_peers = NULL;
struct peer_af *paf;
struct bgp_filter *filter;
+ bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON);
+ bool show_failed = CHECK_FLAG(show_flags, BGP_SHOW_OPT_FAILED);
+ bool show_established =
+ CHECK_FLAG(show_flags, BGP_SHOW_OPT_ESTABLISHED);
+ bool show_wide = CHECK_FLAG(show_flags, BGP_SHOW_OPT_WIDE);
/* labeled-unicast routes are installed in the unicast table so in order
* to
@@ -11003,10 +10915,13 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
vty_out(vty, "%*s", max_neighbor_width - 8,
" ");
if (show_failed)
- vty_out(vty, "EstdCnt DropCnt ResetTime Reason\n");
+ vty_out(vty,
+ BGP_SHOW_SUMMARY_HEADER_FAILED);
else
vty_out(vty,
- "V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc\n");
+ show_wide
+ ? BGP_SHOW_SUMMARY_HEADER_ALL_WIDE
+ : BGP_SHOW_SUMMARY_HEADER_ALL);
}
}
@@ -11046,6 +10961,11 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
peer->domainname);
json_object_int_add(json_peer, "remoteAs", peer->as);
+ json_object_int_add(
+ json_peer, "localAs",
+ peer->change_local_as
+ ? peer->change_local_as
+ : peer->local_as);
json_object_int_add(json_peer, "version", 4);
json_object_int_add(json_peer, "msgRcvd",
PEER_TOTAL_RX(peer));
@@ -11202,14 +11122,33 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
&peer->ibuf->count,
memory_order_relaxed);
- vty_out(vty,
- "4 %10u %9u %9u %8" PRIu64" %4zu %4zu %8s",
- peer->as, PEER_TOTAL_RX(peer),
- PEER_TOTAL_TX(peer),
- peer->version[afi][safi], inq_count,
- outq_count,
- peer_uptime(peer->uptime, timebuf,
- BGP_UPTIME_LEN, 0, NULL));
+ if (show_wide)
+ vty_out(vty,
+ "4 %10u %10u %9u %9u %8" PRIu64
+ " %4zu %4zu %8s",
+ peer->as,
+ peer->change_local_as
+ ? peer->change_local_as
+ : peer->local_as,
+ PEER_TOTAL_RX(peer),
+ PEER_TOTAL_TX(peer),
+ peer->version[afi][safi],
+ inq_count, outq_count,
+ peer_uptime(peer->uptime,
+ timebuf,
+ BGP_UPTIME_LEN, 0,
+ NULL));
+ else
+ vty_out(vty, "4 %10u %9u %9u %8" PRIu64
+ " %4zu %4zu %8s",
+ peer->as, PEER_TOTAL_RX(peer),
+ PEER_TOTAL_TX(peer),
+ peer->version[afi][safi],
+ inq_count, outq_count,
+ peer_uptime(peer->uptime,
+ timebuf,
+ BGP_UPTIME_LEN, 0,
+ NULL));
if (peer->status == Established) {
if (peer->afc_recv[afi][safi]) {
@@ -11227,7 +11166,7 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
[afi]
[pfx_rcd_safi]);
} else {
- vty_out(vty, " NoNeg");
+ vty_out(vty, " NoNeg");
}
if (paf && PAF_SUBGRP(paf)) {
@@ -11244,6 +11183,8 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
(PAF_SUBGRP(
paf))
->scount);
+ } else {
+ vty_out(vty, " NoNeg");
}
} else {
if (CHECK_FLAG(peer->flags,
@@ -11263,7 +11204,10 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
vty_out(vty, " %8u", 0);
}
if (peer->desc)
- vty_out(vty, " %s", peer->desc);
+ vty_out(vty, " %s",
+ bgp_peer_description_stripped(
+ peer->desc,
+ show_wide ? 64 : 20));
else
vty_out(vty, " N/A");
vty_out(vty, "\n");
@@ -11303,14 +11247,14 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
}
static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi,
- int safi, bool show_failed,
- bool show_established, bool use_json)
+ int safi, uint8_t show_flags)
{
int is_first = 1;
int afi_wildcard = (afi == AFI_MAX);
int safi_wildcard = (safi == SAFI_MAX);
int is_wildcard = (afi_wildcard || safi_wildcard);
bool nbr_output = false;
+ bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON);
if (use_json && is_wildcard)
vty_out(vty, "{\n");
@@ -11348,8 +11292,7 @@ static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi,
}
}
bgp_show_summary(vty, bgp, afi, safi,
- show_failed, show_established,
- use_json);
+ show_flags);
}
safi++;
if (!safi_wildcard)
@@ -11371,14 +11314,13 @@ static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi,
}
static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi,
- safi_t safi, bool show_failed,
- bool show_established,
- bool use_json)
+ safi_t safi, uint8_t show_flags)
{
struct listnode *node, *nnode;
struct bgp *bgp;
int is_first = 1;
bool nbr_output = false;
+ bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON);
if (use_json)
vty_out(vty, "{\n");
@@ -11401,8 +11343,7 @@ static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi,
? VRF_DEFAULT_NAME
: bgp->name);
}
- bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_failed,
- show_established, use_json);
+ bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_flags);
}
if (use_json)
@@ -11412,16 +11353,15 @@ static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi,
}
int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
- safi_t safi, bool show_failed, bool show_established,
- bool use_json)
+ safi_t safi, uint8_t show_flags)
{
struct bgp *bgp;
+ bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON);
if (name) {
if (strmatch(name, "all")) {
- bgp_show_all_instances_summary_vty(
- vty, afi, safi, show_failed, show_established,
- use_json);
+ bgp_show_all_instances_summary_vty(vty, afi, safi,
+ show_flags);
return CMD_SUCCESS;
} else {
bgp = bgp_lookup_by_name(name);
@@ -11436,8 +11376,7 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
}
bgp_show_summary_afi_safi(vty, bgp, afi, safi,
- show_failed, show_established,
- use_json);
+ show_flags);
return CMD_SUCCESS;
}
}
@@ -11445,8 +11384,7 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
bgp = bgp_get_default();
if (bgp)
- bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_failed,
- show_established, use_json);
+ bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_flags);
else {
if (use_json)
vty_out(vty, "{}\n");
@@ -11461,7 +11399,7 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
/* `show [ip] bgp summary' commands. */
DEFPY (show_ip_bgp_summary,
show_ip_bgp_summary_cmd,
- "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]] [all$all] summary [established|failed] [json$uj]",
+ "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]] [all$all] summary [established|failed] [wide] [json$uj]",
SHOW_STR
IP_STR
BGP_STR
@@ -11472,13 +11410,13 @@ DEFPY (show_ip_bgp_summary,
"Summary of BGP neighbor status\n"
"Show only sessions in Established state\n"
"Show only sessions not in Established state\n"
+ "Increase table width for longer output\n"
JSON_STR)
{
char *vrf = NULL;
afi_t afi = AFI_MAX;
safi_t safi = SAFI_MAX;
- bool show_failed = false;
- bool show_established = false;
+ uint8_t show_flags = 0;
int idx = 0;
@@ -11499,12 +11437,18 @@ DEFPY (show_ip_bgp_summary,
}
if (argv_find(argv, argc, "failed", &idx))
- show_failed = true;
+ SET_FLAG(show_flags, BGP_SHOW_OPT_FAILED);
+
if (argv_find(argv, argc, "established", &idx))
- show_established = true;
+ SET_FLAG(show_flags, BGP_SHOW_OPT_ESTABLISHED);
+
+ if (argv_find(argv, argc, "wide", &idx))
+ SET_FLAG(show_flags, BGP_SHOW_OPT_WIDE);
+
+ if (argv_find(argv, argc, "json", &idx))
+ SET_FLAG(show_flags, BGP_SHOW_OPT_JSON);
- return bgp_show_summary_vty(vty, vrf, afi, safi, show_failed,
- show_established, uj);
+ return bgp_show_summary_vty(vty, vrf, afi, safi, show_flags);
}
const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json)
@@ -18667,27 +18611,16 @@ void bgp_vty_init(void)
/* "neighbor prefix-list" commands. */
install_element(BGP_NODE, &neighbor_prefix_list_hidden_cmd);
- install_element(BGP_NODE, &no_neighbor_prefix_list_hidden_cmd);
install_element(BGP_IPV4_NODE, &neighbor_prefix_list_cmd);
- install_element(BGP_IPV4_NODE, &no_neighbor_prefix_list_cmd);
install_element(BGP_IPV4M_NODE, &neighbor_prefix_list_cmd);
- install_element(BGP_IPV4M_NODE, &no_neighbor_prefix_list_cmd);
install_element(BGP_IPV4L_NODE, &neighbor_prefix_list_cmd);
- install_element(BGP_IPV4L_NODE, &no_neighbor_prefix_list_cmd);
install_element(BGP_IPV6_NODE, &neighbor_prefix_list_cmd);
- install_element(BGP_IPV6_NODE, &no_neighbor_prefix_list_cmd);
install_element(BGP_IPV6M_NODE, &neighbor_prefix_list_cmd);
- install_element(BGP_IPV6M_NODE, &no_neighbor_prefix_list_cmd);
install_element(BGP_IPV6L_NODE, &neighbor_prefix_list_cmd);
- install_element(BGP_IPV6L_NODE, &no_neighbor_prefix_list_cmd);
install_element(BGP_VPNV4_NODE, &neighbor_prefix_list_cmd);
- install_element(BGP_VPNV4_NODE, &no_neighbor_prefix_list_cmd);
install_element(BGP_VPNV6_NODE, &neighbor_prefix_list_cmd);
- install_element(BGP_VPNV6_NODE, &no_neighbor_prefix_list_cmd);
install_element(BGP_FLOWSPECV4_NODE, &neighbor_prefix_list_cmd);
- install_element(BGP_FLOWSPECV4_NODE, &no_neighbor_prefix_list_cmd);
install_element(BGP_FLOWSPECV6_NODE, &neighbor_prefix_list_cmd);
- install_element(BGP_FLOWSPECV6_NODE, &no_neighbor_prefix_list_cmd);
/* "neighbor filter-list" commands. */
install_element(BGP_NODE, &neighbor_filter_list_hidden_cmd);
@@ -18714,30 +18647,17 @@ void bgp_vty_init(void)
install_element(BGP_FLOWSPECV6_NODE, &no_neighbor_filter_list_cmd);
/* "neighbor route-map" commands. */
- install_element(BGP_NODE, &neighbor_route_map_hidden_cmd);
- install_element(BGP_NODE, &no_neighbor_route_map_hidden_cmd);
install_element(BGP_IPV4_NODE, &neighbor_route_map_cmd);
- install_element(BGP_IPV4_NODE, &no_neighbor_route_map_cmd);
install_element(BGP_IPV4M_NODE, &neighbor_route_map_cmd);
- install_element(BGP_IPV4M_NODE, &no_neighbor_route_map_cmd);
install_element(BGP_IPV4L_NODE, &neighbor_route_map_cmd);
- install_element(BGP_IPV4L_NODE, &no_neighbor_route_map_cmd);
install_element(BGP_IPV6_NODE, &neighbor_route_map_cmd);
- install_element(BGP_IPV6_NODE, &no_neighbor_route_map_cmd);
install_element(BGP_IPV6M_NODE, &neighbor_route_map_cmd);
- install_element(BGP_IPV6M_NODE, &no_neighbor_route_map_cmd);
install_element(BGP_IPV6L_NODE, &neighbor_route_map_cmd);
- install_element(BGP_IPV6L_NODE, &no_neighbor_route_map_cmd);
install_element(BGP_VPNV4_NODE, &neighbor_route_map_cmd);
- install_element(BGP_VPNV4_NODE, &no_neighbor_route_map_cmd);
install_element(BGP_VPNV6_NODE, &neighbor_route_map_cmd);
- install_element(BGP_VPNV6_NODE, &no_neighbor_route_map_cmd);
install_element(BGP_FLOWSPECV4_NODE, &neighbor_route_map_cmd);
- install_element(BGP_FLOWSPECV4_NODE, &no_neighbor_route_map_cmd);
install_element(BGP_FLOWSPECV6_NODE, &neighbor_route_map_cmd);
- install_element(BGP_FLOWSPECV6_NODE, &no_neighbor_route_map_cmd);
install_element(BGP_EVPN_NODE, &neighbor_route_map_cmd);
- install_element(BGP_EVPN_NODE, &no_neighbor_route_map_cmd);
/* "neighbor unsuppress-map" commands. */
install_element(BGP_NODE, &neighbor_unsuppress_map_hidden_cmd);
diff --git a/bgpd/bgp_vty.h b/bgpd/bgp_vty.h
index 07f61ab0ea..85619dd074 100644
--- a/bgpd/bgp_vty.h
+++ b/bgpd/bgp_vty.h
@@ -53,6 +53,12 @@ struct bgp;
" Helper - GR Mode-Helper,\n" \
" Disable - GR Mode-Disable.\n\n"
+#define BGP_SHOW_SUMMARY_HEADER_ALL \
+ "V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc\n"
+#define BGP_SHOW_SUMMARY_HEADER_ALL_WIDE \
+ "V AS LocalAS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc\n"
+#define BGP_SHOW_SUMMARY_HEADER_FAILED "EstdCnt DropCnt ResetTime Reason\n"
+
#define BGP_SHOW_PEER_GR_CAPABILITY( \
vty, p, use_json, json) \
do { \
@@ -178,8 +184,7 @@ extern int bgp_vty_find_and_parse_afi_safi_bgp(struct vty *vty,
int bgp_vty_find_and_parse_bgp(struct vty *vty, struct cmd_token **argv,
int argc, struct bgp **bgp, bool use_json);
extern int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
- safi_t safi, bool show_failed,
- bool show_established, bool use_json);
+ safi_t safi, uint8_t show_flags);
extern int bgp_clear_star_soft_in(const char *name, char *errmsg,
size_t errmsg_len);
extern int bgp_clear_star_soft_out(const char *name, char *errmsg,
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index f7c4b04adf..8d03079fd7 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -67,6 +67,10 @@
/* All information about zebra. */
struct zclient *zclient = NULL;
+/* hook to indicate vrf status change for SNMP */
+DEFINE_HOOK(bgp_vrf_status_changed, (struct bgp *bgp, struct interface *ifp),
+ (bgp, ifp))
+
/* Can we install into zebra? */
static inline bool bgp_install_info_to_zebra(struct bgp *bgp)
{
@@ -212,8 +216,10 @@ static int bgp_ifp_destroy(struct interface *ifp)
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("Rx Intf del VRF %u IF %s", ifp->vrf_id, ifp->name);
- if (bgp)
+ if (bgp) {
bgp_update_interface_nbrs(bgp, ifp, NULL);
+ hook_call(bgp_vrf_status_changed, bgp, ifp);
+ }
bgp_mac_del_mac_entry(ifp);
@@ -243,6 +249,7 @@ static int bgp_ifp_up(struct interface *ifp)
for (ALL_LIST_ELEMENTS(ifp->nbr_connected, node, nnode, nc))
bgp_nbr_connected_add(bgp, nc);
+ hook_call(bgp_vrf_status_changed, bgp, ifp);
return 0;
}
@@ -297,6 +304,7 @@ static int bgp_ifp_down(struct interface *ifp)
}
}
+ hook_call(bgp_vrf_status_changed, bgp, ifp);
return 0;
}
@@ -461,6 +469,8 @@ static int bgp_interface_vrf_update(ZAPI_CALLBACK_ARGS)
for (ALL_LIST_ELEMENTS(ifp->nbr_connected, node, nnode, nc))
bgp_nbr_connected_add(bgp, nc);
+
+ hook_call(bgp_vrf_status_changed, bgp, ifp);
return 0;
}
@@ -896,6 +906,7 @@ bgp_path_info_to_ipv6_nexthop(struct bgp_path_info *path, ifindex_t *ifindex)
/* Workaround for Cisco's nexthop bug. */
if (IN6_IS_ADDR_UNSPECIFIED(
&path->attr->mp_nexthop_global)
+ && path->peer->su_remote
&& path->peer->su_remote->sa.sa_family
== AF_INET6) {
nexthop =
@@ -943,8 +954,11 @@ static bool bgp_table_map_apply(struct route_map *map, const struct prefix *p,
zlog_debug(
"Zebra rmap deny: IPv6 route %pFX nexthop %s",
p,
- inet_ntop(AF_INET6, nexthop, buf[1],
- sizeof(buf[1])));
+ nexthop ? inet_ntop(AF_INET6, nexthop, buf[1],
+ sizeof(buf[1]))
+ : inet_ntop(AF_INET,
+ &path->attr->nexthop,
+ buf[1], sizeof(buf[1])));
}
}
return false;
@@ -2963,6 +2977,7 @@ static int bgp_ifp_create(struct interface *ifp)
bgp_mac_add_mac_entry(ifp);
bgp_update_interface_nbrs(bgp, ifp, ifp);
+ hook_call(bgp_vrf_status_changed, bgp, ifp);
return 0;
}
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 368397d7aa..b11fd5288a 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -3704,6 +3704,7 @@ void bgp_free(struct bgp *bgp)
XFREE(MTYPE_BGP, bgp->name);
XFREE(MTYPE_BGP, bgp->name_pretty);
+ XFREE(MTYPE_BGP, bgp->snmp_stats);
XFREE(MTYPE_BGP, bgp);
}
@@ -5216,10 +5217,10 @@ int peer_timers_set(struct peer *peer, uint32_t keepalive, uint32_t holdtime)
struct peer *member;
struct listnode *node, *nnode;
- if (keepalive > 65535)
+ if (keepalive > UINT16_MAX)
return BGP_ERR_INVALID_VALUE;
- if (holdtime > 65535)
+ if (holdtime > UINT16_MAX)
return BGP_ERR_INVALID_VALUE;
if (holdtime < 3 && holdtime != 0)
@@ -5296,7 +5297,7 @@ int peer_timers_connect_set(struct peer *peer, uint32_t connect)
struct peer *member;
struct listnode *node, *nnode;
- if (connect > 65535)
+ if (connect > UINT16_MAX)
return BGP_ERR_INVALID_VALUE;
/* Set flag and configuration on peer. */
@@ -5305,9 +5306,14 @@ int peer_timers_connect_set(struct peer *peer, uint32_t connect)
peer->v_connect = connect;
/* Skip peer-group mechanics for regular peers. */
- if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP))
+ if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ if (peer->status != Established) {
+ if (peer_active(peer))
+ BGP_EVENT_ADD(peer, BGP_Stop);
+ BGP_EVENT_ADD(peer, BGP_Start);
+ }
return 0;
-
+ }
/*
* Set flag and configuration on all peer-group members, unless they are
* explicitely overriding peer-group configuration.
@@ -5321,6 +5327,12 @@ int peer_timers_connect_set(struct peer *peer, uint32_t connect)
SET_FLAG(member->flags, PEER_FLAG_TIMER_CONNECT);
member->connect = connect;
member->v_connect = connect;
+
+ if (member->status != Established) {
+ if (peer_active(member))
+ BGP_EVENT_ADD(member, BGP_Stop);
+ BGP_EVENT_ADD(member, BGP_Start);
+ }
}
return 0;
@@ -5348,9 +5360,14 @@ int peer_timers_connect_unset(struct peer *peer)
peer->v_connect = peer->bgp->default_connect_retry;
/* Skip peer-group mechanics for regular peers. */
- if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP))
+ if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ if (peer->status != Established) {
+ if (peer_active(peer))
+ BGP_EVENT_ADD(peer, BGP_Stop);
+ BGP_EVENT_ADD(peer, BGP_Start);
+ }
return 0;
-
+ }
/*
* Remove flag and configuration from all peer-group members, unless
* they are explicitely overriding peer-group configuration.
@@ -5364,6 +5381,12 @@ int peer_timers_connect_unset(struct peer *peer)
UNSET_FLAG(member->flags, PEER_FLAG_TIMER_CONNECT);
member->connect = 0;
member->v_connect = peer->bgp->default_connect_retry;
+
+ if (member->status != Established) {
+ if (peer_active(member))
+ BGP_EVENT_ADD(member, BGP_Stop);
+ BGP_EVENT_ADD(member, BGP_Start);
+ }
}
return 0;
@@ -5684,11 +5707,6 @@ int peer_allowas_in_unset(struct peer *peer, afi_t afi, safi_t safi)
PEER_FLAG_ALLOWAS_IN))
continue;
- /* Skip peers where flag is already disabled. */
- if (!CHECK_FLAG(member->af_flags[afi][safi],
- PEER_FLAG_ALLOWAS_IN))
- continue;
-
/* Remove flags and configuration on peer-group member. */
UNSET_FLAG(member->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN);
UNSET_FLAG(member->af_flags[afi][safi],
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 9089608062..7a8f99163e 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -310,6 +310,15 @@ enum bgp_link_bw_handling {
RB_HEAD(bgp_es_vrf_rb_head, bgp_evpn_es_vrf);
RB_PROTOTYPE(bgp_es_vrf_rb_head, bgp_evpn_es_vrf, rb_node, bgp_es_vrf_rb_cmp);
+struct bgp_snmp_stats {
+ /* SNMP variables for mplsL3Vpn*/
+ time_t creation_time;
+ time_t modify_time;
+ bool active;
+ uint32_t routes_added;
+ uint32_t routes_deleted;
+};
+
/* BGP instance structure. */
struct bgp {
/* AS number of this BGP instance. */
@@ -363,6 +372,8 @@ struct bgp {
uint32_t subgrps_deleted;
} update_group_stats;
+ struct bgp_snmp_stats *snmp_stats;
+
/* BGP configuration. */
uint16_t config;
#define BGP_CONFIG_CLUSTER_ID (1 << 0)
@@ -910,7 +921,47 @@ struct bgp_peer_gr {
bgp_peer_gr_action_ptr action_fun;
};
-/* BGP finite state machine events. */
+/*
+ * BGP FSM event codes, per RFC 4271 ss. 8.1
+ */
+enum bgp_fsm_rfc_codes {
+ BGP_FSM_ManualStart = 1,
+ BGP_FSM_ManualStop = 2,
+ BGP_FSM_AutomaticStart = 3,
+ BGP_FSM_ManualStart_with_PassiveTcpEstablishment = 4,
+ BGP_FSM_AutomaticStart_with_PassiveTcpEstablishment = 5,
+ BGP_FSM_AutomaticStart_with_DampPeerOscillations = 6,
+ BGP_FSM_AutomaticStart_with_DampPeerOscillations_and_PassiveTcpEstablishment =
+ 7,
+ BGP_FSM_AutomaticStop = 8,
+ BGP_FSM_ConnectRetryTimer_Expires = 9,
+ BGP_FSM_HoldTimer_Expires = 10,
+ BGP_FSM_KeepaliveTimer_Expires = 11,
+ BGP_FSM_DelayOpenTimer_Expires = 12,
+ BGP_FSM_IdleHoldTimer_Expires = 13,
+ BGP_FSM_TcpConnection_Valid = 14,
+ BGP_FSM_Tcp_CR_Invalid = 15,
+ BGP_FSM_Tcp_CR_Acked = 16,
+ BGP_FSM_TcpConnectionConfirmed = 17,
+ BGP_FSM_TcpConnectionFails = 18,
+ BGP_FSM_BGPOpen = 19,
+ BGP_FSM_BGPOpen_with_DelayOpenTimer_running = 20,
+ BGP_FSM_BGPHeaderErr = 21,
+ BGP_FSM_BGPOpenMsgErr = 22,
+ BGP_FSM_OpenCollisionDump = 23,
+ BGP_FSM_NotifMsgVerErr = 24,
+ BGP_FSM_NotifMsg = 25,
+ BGP_FSM_KeepAliveMsg = 26,
+ BGP_FSM_UpdateMsg = 27,
+ BGP_FSM_UpdateMsgErr = 28
+};
+
+/*
+ * BGP finite state machine events
+ *
+ * Note: these do not correspond to RFC-defined event codes. Those are
+ * defined elsewhere.
+ */
enum bgp_fsm_events {
BGP_Start = 1,
BGP_Stop,
@@ -2246,6 +2297,27 @@ static inline struct vrf *bgp_vrf_lookup_by_instance_type(struct bgp *bgp)
return vrf;
}
+static inline uint32_t bgp_vrf_interfaces(struct bgp *bgp, bool active)
+{
+ struct vrf *vrf;
+ struct interface *ifp;
+ uint32_t count = 0;
+
+ /* if there is one interface in the vrf which is up then it is deemed
+ * active
+ */
+ vrf = bgp_vrf_lookup_by_instance_type(bgp);
+ if (vrf == NULL)
+ return 0;
+ RB_FOREACH (ifp, if_name_head, &vrf->ifaces_by_name) {
+ if (strncmp(ifp->name, bgp->name, VRF_NAMSIZ) == 0)
+ continue;
+ if (!active || if_is_up(ifp))
+ count++;
+ }
+ return count;
+}
+
/* Link BGP instance to VRF. */
static inline void bgp_vrf_link(struct bgp *bgp, struct vrf *vrf)
{
@@ -2283,7 +2355,14 @@ extern int bgp_lookup_by_as_name_type(struct bgp **bgp_val, as_t *as,
enum bgp_instance_type inst_type);
/* Hooks */
-DECLARE_HOOK(peer_status_changed, (struct peer * peer), (peer))
+DECLARE_HOOK(bgp_vrf_status_changed, (struct bgp *bgp, struct interface *ifp),
+ (bgp, ifp))
+DECLARE_HOOK(peer_status_changed, (struct peer *peer), (peer))
+DECLARE_HOOK(bgp_snmp_init_stats, (struct bgp *bgp), (bgp))
+DECLARE_HOOK(bgp_snmp_update_last_changed, (struct bgp *bgp), (bgp))
+DECLARE_HOOK(bgp_snmp_update_stats,
+ (struct bgp_node *rn, struct bgp_path_info *pi, bool added),
+ (rn, pi, added))
void peer_nsf_stop(struct peer *peer);
#endif /* _QUAGGA_BGPD_H */
diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c
index f7bbd44512..8c455c6ea5 100644
--- a/bgpd/rfapi/rfapi.c
+++ b/bgpd/rfapi/rfapi.c
@@ -578,9 +578,6 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */
struct bgp_attr_encap_subtlv *encaptlv;
char buf[PREFIX_STRLEN];
char buf2[RD_ADDRSTRLEN];
-#if 0 /* unused? */
- struct prefix pfx_buf;
-#endif
struct rfapi_nexthop *lnh = NULL; /* local nexthop */
struct rfapi_vn_option *vo;
@@ -603,20 +600,6 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */
return;
}
-#if 0 /* unused? */
- if ((safi == SAFI_MPLS_VPN) && (flags & RFAPI_AHR_SET_PFX_TO_NEXTHOP))
- {
-
- if (rfapiRaddr2Qprefix (nexthop, &pfx_buf))
- {
- vnc_zlog_debug_verbose
- ("%s: can't set pfx to vn addr, not adding SAFI_MPLS_VPN route",
- __func__);
- return;
- }
- p = &pfx_buf;
- }
-#endif
for (vo = options_vn; vo; vo = vo->next) {
if (RFAPI_VN_OPTION_TYPE_L2ADDR == vo->type) {
l2o = &vo->v.l2addr;
diff --git a/bgpd/rfapi/rfapi_import.c b/bgpd/rfapi/rfapi_import.c
index 3d87b63542..b2732a40b4 100644
--- a/bgpd/rfapi/rfapi_import.c
+++ b/bgpd/rfapi/rfapi_import.c
@@ -984,7 +984,7 @@ static int rfapiEcommunitiesMatchBeec(struct ecommunity *ecom,
int rfapiEcommunitiesIntersect(struct ecommunity *e1, struct ecommunity *e2)
{
- int i, j;
+ uint32_t i, j;
if (!e1 || !e2)
return 0;
@@ -1014,7 +1014,8 @@ int rfapiEcommunitiesIntersect(struct ecommunity *e1, struct ecommunity *e2)
int rfapiEcommunityGetLNI(struct ecommunity *ecom, uint32_t *lni)
{
if (ecom) {
- int i;
+ uint32_t i;
+
for (i = 0; i < ecom->size; ++i) {
uint8_t *p = ecom->val + (i * ECOMMUNITY_SIZE);
@@ -1034,7 +1035,8 @@ int rfapiEcommunityGetEthernetTag(struct ecommunity *ecom, uint16_t *tag_id)
struct bgp *bgp = bgp_get_default();
*tag_id = 0; /* default to untagged */
if (ecom) {
- int i;
+ uint32_t i;
+
for (i = 0; i < ecom->size; ++i) {
as_t as = 0;
int encode = 0;
diff --git a/bgpd/rfapi/rfapi_private.h b/bgpd/rfapi/rfapi_private.h
index e24d62def4..af56367955 100644
--- a/bgpd/rfapi/rfapi_private.h
+++ b/bgpd/rfapi/rfapi_private.h
@@ -289,9 +289,6 @@ add_vnc_route(struct rfapi_descriptor *rfd, /* cookie + UN addr for VPN */
uint8_t type, uint8_t sub_type, int flags);
#define RFAPI_AHR_NO_TUNNEL_SUBTLV 0x00000001
#define RFAPI_AHR_RFPOPT_IS_VNCTLV 0x00000002 /* hack! */
-#if 0 /* unused? */
-# define RFAPI_AHR_SET_PFX_TO_NEXTHOP 0x00000004
-#endif
extern void del_vnc_route(struct rfapi_descriptor *rfd, struct peer *peer,
struct bgp *bgp, safi_t safi, const struct prefix *p,
diff --git a/bgpd/rfapi/vnc_export_bgp.c b/bgpd/rfapi/vnc_export_bgp.c
index 762cd2596f..c90fcf8d72 100644
--- a/bgpd/rfapi/vnc_export_bgp.c
+++ b/bgpd/rfapi/vnc_export_bgp.c
@@ -134,7 +134,7 @@ static void encap_attr_export_ce(struct attr *new, struct attr *orig,
static int getce(struct bgp *bgp, struct attr *attr, struct prefix *pfx_ce)
{
uint8_t *ecp;
- int i;
+ uint32_t i;
uint16_t localadmin = bgp->rfapi_cfg->resolve_nve_roo_local_admin;
for (ecp = attr->ecommunity->val, i = 0; i < attr->ecommunity->size;
@@ -1177,19 +1177,19 @@ static void vnc_direct_add_rn_group_rd(struct bgp *bgp,
if (!rfg->rt_export_list || !rfg->rfapi_import_table) {
vnc_zlog_debug_verbose(
- "%s: VRF \"%s\" is missing RT import/export configuration.\n",
+ "%s: VRF \"%s\" is missing RT import/export configuration.",
__func__, rfg->name);
return;
}
if (!rfg->rd.prefixlen) {
vnc_zlog_debug_verbose(
- "%s: VRF \"%s\" is missing RD configuration.\n",
+ "%s: VRF \"%s\" is missing RD configuration.",
__func__, rfg->name);
return;
}
if (rfg->label > MPLS_LABEL_MAX) {
vnc_zlog_debug_verbose(
- "%s: VRF \"%s\" is missing default label configuration.\n",
+ "%s: VRF \"%s\" is missing default label configuration.",
__func__, rfg->name);
return;
}
diff --git a/bgpd/subdir.am b/bgpd/subdir.am
index ac84f4b9e4..4614363bf0 100644
--- a/bgpd/subdir.am
+++ b/bgpd/subdir.am
@@ -96,6 +96,7 @@ bgpd_libbgp_a_SOURCES = \
bgpd/bgp_regex.c \
bgpd/bgp_route.c \
bgpd/bgp_routemap.c \
+ bgpd/bgp_script.c \
bgpd/bgp_table.c \
bgpd/bgp_updgrp.c \
bgpd/bgp_updgrp_adv.c \
@@ -166,6 +167,7 @@ noinst_HEADERS += \
bgpd/bgp_memory.h \
bgpd/bgp_mpath.h \
bgpd/bgp_mplsvpn.h \
+ bgpd/bgp_mplsvpn_snmp.h \
bgpd/bgp_network.h \
bgpd/bgp_nexthop.h \
bgpd/bgp_nht.h \
@@ -175,6 +177,7 @@ noinst_HEADERS += \
bgpd/bgp_rd.h \
bgpd/bgp_regex.h \
bgpd/bgp_route.h \
+ bgpd/bgp_script.h \
bgpd/bgp_table.h \
bgpd/bgp_updgrp.h \
bgpd/bgp_vpn.h \
@@ -216,7 +219,7 @@ bgpd_bgp_btoa_CFLAGS = $(AM_CFLAGS)
bgpd_bgpd_LDADD = bgpd/libbgp.a $(RFPLDADD) lib/libfrr.la $(LIBCAP) $(LIBM) $(UST_LIBS)
bgpd_bgp_btoa_LDADD = bgpd/libbgp.a $(RFPLDADD) lib/libfrr.la $(LIBCAP) $(LIBM) $(UST_LIBS)
-bgpd_bgpd_snmp_la_SOURCES = bgpd/bgp_snmp.c
+bgpd_bgpd_snmp_la_SOURCES = bgpd/bgp_snmp.c bgpd/bgp_mplsvpn_snmp.c
bgpd_bgpd_snmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS) -std=gnu99
bgpd_bgpd_snmp_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
bgpd_bgpd_snmp_la_LIBADD = lib/libfrrsnmp.la
diff --git a/configure.ac b/configure.ac
index 495019ee14..f3d1f38986 100755
--- a/configure.ac
+++ b/configure.ac
@@ -7,7 +7,7 @@
##
AC_PREREQ([2.60])
-AC_INIT([frr], [7.6-dev], [https://github.com/frrouting/frr/issues])
+AC_INIT([frr], [7.7-dev], [https://github.com/frrouting/frr/issues])
PACKAGE_URL="https://frrouting.org/"
AC_SUBST([PACKAGE_URL])
PACKAGE_FULLNAME="FRRouting"
@@ -138,6 +138,12 @@ AC_ARG_WITH([moduledir], [AS_HELP_STRING([--with-moduledir=DIR], [module directo
])
AC_SUBST([moduledir], [$moduledir])
+AC_ARG_WITH([scriptdir], [AS_HELP_STRING([--with-scriptdir=DIR], [script directory (${sysconfdir}/scripts)])], [
+ scriptdir="$withval"
+], [
+ scriptdir="\${sysconfdir}/scripts"
+])
+AC_SUBST([scriptdir], [$scriptdir])
AC_ARG_WITH([yangmodelsdir], [AS_HELP_STRING([--with-yangmodelsdir=DIR], [yang models directory (${datarootdir}/yang)])], [
yangmodelsdir="$withval"
@@ -185,6 +191,11 @@ CXXFLAGS="$orig_cxxflags"
AC_PROG_CC_C99
dnl NB: see C11 below
+dnl Some special handling for ICC later on
+if test "$CC" = "icc"; then
+ cc_is_icc="yes"
+fi
+
PKG_PROG_PKG_CONFIG
dnl it's 2019, sed is sed.
@@ -246,7 +257,9 @@ AC_DEFUN([AC_LINK_IFELSE_FLAGS], [{
dnl ICC won't bail on unknown options without -diag-error 10006
dnl need to do this first so we get useful results for the other options
-AC_C_FLAG([-diag-error 10006])
+if test "$cc_is_icc" = "yes"; then
+ AC_C_FLAG([-diag-error 10006])
+fi
dnl AC_PROG_CC_C99 may change CC to include -std=gnu99 or something
ac_cc="$CC"
@@ -274,24 +287,22 @@ if test "$enable_clang_coverage" = "yes"; then
])
fi
+if test "$enable_scripting" = "yes"; then
+ AX_PROG_LUA([5.3])
+ AX_LUA_HEADERS
+ AX_LUA_LIBS([
+ AC_DEFINE([HAVE_SCRIPTING], [1], [Have support for scripting])
+ LIBS="$LIBS $LUA_LIB"
+ ])
+fi
+
if test "$enable_dev_build" = "yes"; then
AC_DEFINE([DEV_BUILD], [1], [Build for development])
if test "$orig_cflags" = ""; then
AC_C_FLAG([-g3])
AC_C_FLAG([-O0])
fi
- if test "$enable_lua" = "yes"; then
- AX_PROG_LUA([5.3])
- AX_LUA_HEADERS
- AX_LUA_LIBS([
- AC_DEFINE([HAVE_LUA], [1], [Have support for Lua interpreter])
- LIBS="$LIBS $LUA_LIB"
- ])
- fi
else
- if test "$enable_lua" = "yes"; then
- AC_MSG_ERROR([Lua is not meant to be built/used outside of development at this time])
- fi
if test "$orig_cflags" = ""; then
AC_C_FLAG([-g])
AC_C_FLAG([-O2])
@@ -331,7 +342,9 @@ AC_SUBST([CXX_COMPAT_CFLAGS])
dnl ICC emits a broken warning for const char *x = a ? "b" : "c";
dnl for some reason the string consts get 'promoted' to char *,
dnl triggering a const to non-const conversion warning.
-AC_C_FLAG([-diag-disable 3179])
+if test "$cc_is_icc" = "yes"; then
+ AC_C_FLAG([-diag-disable 3179])
+fi
if test "$enable_werror" = "yes" ; then
WERROR="-Werror"
@@ -407,6 +420,21 @@ else
])
fi
+AC_MSG_CHECKING([whether linker supports __start/stop_section symbols])
+AC_LINK_IFELSE([AC_LANG_PROGRAM([[
+#include <stdio.h>
+int __attribute__((section("secttest"))) var = 1;
+extern int __start_secttest, __stop_secttest;
+]], [[
+ void *a = &var, *b = &__start_secttest, *c = &__stop_secttest;
+ printf("%p %p %p\n", a, b, c);
+]])], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_SECTION_SYMS, 1, [have __start/stop_section symbols])
+], [
+ AC_MSG_RESULT(no)
+])
+
dnl ----------
dnl Essentials
dnl ----------
@@ -697,8 +725,8 @@ fi
AC_ARG_ENABLE([dev_build],
AS_HELP_STRING([--enable-dev-build], [build for development]))
-AC_ARG_ENABLE([lua],
- AS_HELP_STRING([--enable-lua], [Build Lua scripting]))
+AC_ARG_ENABLE([scripting],
+ AS_HELP_STRING([--enable-scripting], [Build with scripting support]))
AC_ARG_ENABLE([netlink-debug],
AS_HELP_STRING([--disable-netlink-debug], [pretty print netlink debug messages]))
@@ -2347,6 +2375,29 @@ if test "$frr_cv_mallinfo" = "yes"; then
AC_DEFINE([HAVE_MALLINFO], [1], [mallinfo])
fi
+AC_CACHE_CHECK([whether mallinfo2 is available], [frr_cv_mallinfo2], [
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([FRR_INCLUDES [
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+#ifdef HAVE_MALLOC_NP_H
+#include <malloc_np.h>
+#endif
+#ifdef HAVE_MALLOC_MALLOC_H
+#include <malloc/malloc.h>
+#endif
+]], [[
+struct mallinfo2 ac_x; ac_x = mallinfo2 ();
+]])], [
+ frr_cv_mallinfo2=yes
+ ], [
+ frr_cv_mallinfo2=no
+ ])
+])
+if test "$frr_cv_mallinfo2" = "yes"; then
+ AC_DEFINE([HAVE_MALLINFO2], [1], [mallinfo2])
+fi
+
AC_MSG_CHECKING([whether malloc_usable_size is available])
AC_LINK_IFELSE([AC_LANG_PROGRAM([FRR_INCLUDES [
#ifdef HAVE_MALLOC_H
@@ -2446,19 +2497,23 @@ CFG_SBIN="$sbindir"
CFG_STATE="$frr_statedir"
CFG_MODULE="$moduledir"
CFG_YANGMODELS="$yangmodelsdir"
+CFG_SCRIPT="$scriptdir"
for I in 1 2 3 4 5 6 7 8 9 10; do
eval CFG_SYSCONF="\"$CFG_SYSCONF\""
eval CFG_SBIN="\"$CFG_SBIN\""
eval CFG_STATE="\"$CFG_STATE\""
eval CFG_MODULE="\"$CFG_MODULE\""
eval CFG_YANGMODELS="\"$CFG_YANGMODELS\""
+ eval CFG_SCRIPT="\"$CFG_SCRIPT\""
done
AC_SUBST([CFG_SYSCONF])
AC_SUBST([CFG_SBIN])
AC_SUBST([CFG_STATE])
AC_SUBST([CFG_MODULE])
+AC_SUBST([CFG_SCRIPT])
AC_SUBST([CFG_YANGMODELS])
AC_DEFINE_UNQUOTED([MODULE_PATH], ["$CFG_MODULE"], [path to modules])
+AC_DEFINE_UNQUOTED([SCRIPT_PATH], ["$CFG_SCRIPT"], [path to scripts])
AC_DEFINE_UNQUOTED([YANG_MODELS_PATH], ["$CFG_YANGMODELS"], [path to YANG data models])
AC_DEFINE_UNQUOTED([WATCHFRR_SH_PATH], ["${CFG_SBIN%/}/watchfrr.sh"], [path to watchfrr.sh])
@@ -2540,6 +2595,8 @@ AC_CONFIG_FILES([tools/frr], [chmod +x tools/frr])
AC_CONFIG_FILES([tools/watchfrr.sh], [chmod +x tools/watchfrr.sh])
AC_CONFIG_FILES([tools/frrinit.sh], [chmod +x tools/frrinit.sh])
AC_CONFIG_FILES([tools/frrcommon.sh])
+AC_CONFIG_FILES([tools/frr.service])
+AC_CONFIG_FILES([tools/frr@.service])
AS_IF([test "$with_pkg_git_version" = "yes"], [
AC_CONFIG_COMMANDS([lib/gitversion.h], [
@@ -2582,6 +2639,7 @@ state file directory : ${frr_statedir}
config file directory : `eval echo \`echo ${sysconfdir}\``
example directory : `eval echo \`echo ${exampledir}\``
module directory : ${CFG_MODULE}
+script directory : ${CFG_SCRIPT}
user to run as : ${enable_user}
group to run as : ${enable_group}
group for vty sockets : ${enable_vty_group}
diff --git a/debian/control b/debian/control
index 4aaa9f21bf..b9e96b55d0 100644
--- a/debian/control
+++ b/debian/control
@@ -29,7 +29,8 @@ Build-Depends: bison,
python3-dev,
python3-pytest <!nocheck>,
python3-sphinx,
- texinfo (>= 4.7)
+ texinfo (>= 4.7),
+ liblua5.3-dev <pkg.frr.lua>
Standards-Version: 4.5.0.3
Homepage: https://www.frrouting.org/
Vcs-Browser: https://github.com/FRRouting/frr/tree/debian/master
diff --git a/debian/rules b/debian/rules
index 6cc03c378a..25ae04261d 100755
--- a/debian/rules
+++ b/debian/rules
@@ -29,6 +29,12 @@ else
CONF_SYSTEMD=--enable-systemd=no
endif
+ifeq ($(filter pkg.frr.lua,$(DEB_BUILD_PROFILES)),)
+ CONF_LUA=--disable-scripting
+else
+ CONF_LUA=--enable-scripting
+endif
+
export PYTHON=python3
%:
@@ -49,6 +55,7 @@ override_dh_auto_configure:
\
$(CONF_SYSTEMD) \
$(CONF_RPKI) \
+ $(CONF_LUA) \
--with-libpam \
--enable-doc \
--enable-doc-html \
diff --git a/doc/developer/building.rst b/doc/developer/building.rst
index a6cd545872..c687ba8dc8 100644
--- a/doc/developer/building.rst
+++ b/doc/developer/building.rst
@@ -29,3 +29,4 @@ Building FRR
building-frr-for-ubuntu2004
building-frr-for-archlinux
building-docker
+ cross-compiling
diff --git a/doc/developer/cross-compiling.rst b/doc/developer/cross-compiling.rst
new file mode 100644
index 0000000000..339e00c921
--- /dev/null
+++ b/doc/developer/cross-compiling.rst
@@ -0,0 +1,326 @@
+Cross-Compiling
+===============
+
+FRR is capable of being cross-compiled to a number of different architectures.
+With an adequate toolchain this process is fairly straightforward, though one
+must exercise caution to validate this toolchain's correctness before attempting
+to compile FRR or its dependencies; small oversights in the construction of the
+build tools may lead to problems which quickly become difficult to diagnose.
+
+Toolchain Preliminary
+---------------------
+
+The first step to cross-compiling any program is to identify the system which
+the program (FRR) will run on. From here on this will be called the "host"
+machine, following autotools' convention, while the machine building FRR will be
+called the "build" machine. The toolchain will of course be installed onto the
+build machine and be leveraged to build FRR for the host machine to run.
+
+.. note::
+
+ The build machine used while writing this guide was ``x86_64-pc-linux-gnu``
+ and the target machine was ``arm-linux-gnueabihf`` (a Raspberry Pi 3B+).
+ Replace this with your targeted tuple below if you plan on running the
+ commands from this guide:
+
+ .. code-block:: shell
+
+ export HOST_ARCH="arm-linux-gnueabihf"
+
+ For your given target, the build system's OS may have some support for
+ building cross compilers natively, or may even offer binary toolchains built
+ upstream for the target architecture. Check your package manager or OS
+ documentation before committing to building a toolchain from scratch.
+
+This guide will not detail *how* to build a cross-compiling toolchain but
+will instead assume one already exists and is installed on the build system.
+The methods for building the toolchain itself may differ between operating
+systems so consult the OS documentation for any particulars regarding
+cross-compilers. The OSDev wiki has a `pleasant tutorial`_ on cross-compiling in
+the context of operating system development which bootstraps from only the
+native GCC and binutils on the build machine. This may be useful if the build
+machine's OS does not offer existing tools to build a cross-compiler targeting
+the host.
+
+.. _pleasant tutorial: https://wiki.osdev.org/GCC_Cross-Compiler
+
+This guide will also not demonstrate how to build all of FRR's dependencies for the
+target architecture. Instead, general instructions for using a cross-compiling
+toolchain to compile packages using CMake, Autotools, and Makefiles are
+provided; these three cases apply to almost all FRR dependencies.
+
+.. _glibc mismatch:
+
+.. warning::
+
+ Ensure the versions and implementations of the C standard library (glibc or
+ what have you) match on the host and the build toolchain. ``ldd --version``
+ will help you here. Upgrade one or the other if the they do not match.
+
+Testing the Toolchain
+---------------------
+
+Before any cross-compilation begins it would be prudent to test the new
+toolchain by writing, compiling and linking a simple program.
+
+.. code-block:: shell
+
+ # A small program
+ cat > nothing.c <<EOF
+ int main() { return 0; }
+ EOF
+
+ # Build and link with the cross-compiler
+ ${HOST_ARCH}-gcc -o nothing nothing.c
+
+ # Inspect the resulting binary, results may vary
+ file ./nothing
+
+ # nothing: ELF 32-bit LSB pie executable, ARM, EABI5 version 1 (SYSV),
+ # dynamically linked, interpreter /lib/ld-linux-armhf.so.3,
+ # for GNU/Linux 3.2.0, not stripped
+
+If this produced no errors then the installed toolchain is probably ready to
+start compiling the build dependencies and eventually FRR itself. There still
+may be lurking issues but fundamentally the toolchain can produce binaries and
+that's good enough to start working with it.
+
+.. warning::
+
+ If any errors occurred during the previous functional test please look back
+ and address them before moving on; this indicates your cross-compiling
+ toolchain is *not* in a position to build FRR or its dependencies. Even if
+ everything was fine, keep in mind that many errors from here on *may still
+ be related* to your toolchain (e.g. libstdc++.so or other components) and this
+ small test is not a guarantee of complete toolchain coherence.
+
+Cross-compiling Dependencies
+----------------------------
+
+When compiling FRR it is necessary to compile some of its dependencies alongside
+it on the build machine. This is so symbols from the shared libraries (which
+will be loaded at run-time on the host machine) can be linked to the FRR
+binaries at compile time; additionally, headers for these libraries are needed
+during the compile stage for a successful build.
+
+Sysroot Overview
+^^^^^^^^^^^^^^^^
+
+All build dependencies should be installed into a "root" directory on the build
+computer, hereafter called the "sysroot". This directory will be prefixed to
+paths while searching for requisite libraries and headers during the build
+process. Often this may be set via a ``--prefix`` flag when building the
+dependent packages, meaning a ``make install`` will copy compiled libraries into
+(e.g.) ``/usr/${HOST_ARCH}/usr``.
+
+If the toolchain was built on the build machine then there is likely already a
+sysroot where those tools and standard libraries were installed; it may be
+helpful to use that directory as the sysroot for this build as well.
+
+Basic Workflow
+^^^^^^^^^^^^^^
+
+Before compiling or building any dependencies, make note of which daemons are
+being targeted and which libraries will be needed. Not all dependencies are
+necessary if only building with a subset of the daemons.
+
+The following workflow will compile and install any libraries which can be built
+with Autotools. The resultant library will be installed into the sysroot
+``/usr/${HOST_ARCH}``.
+
+.. code-block:: shell
+
+ ./configure \
+ CC=${HOST_ARCH}-gcc \
+ CXX=${HOST_ARCH}-g++ \
+ --build=${HOST_ARCH} \
+ --prefix=/usr/${HOST_ARCH}
+ make
+ make install
+
+Some libraries like ``json-c`` and ``libyang`` are packaged with CMake and can
+be built and installed generally like:
+
+.. code-block:: shell
+
+ mkdir build
+ cd build
+ CC=${HOST_ARCH}-gcc \
+ CXX=${HOST_ARCH}-g++ \
+ cmake \
+ -DCMAKE_INSTALL_PREFIX=/usr/${HOST_ARCH} \
+ ..
+ make
+ make install
+
+For programs with only a Makefile (e.g. ``libcap``) the process may look still a
+little different:
+
+.. code-block:: shell
+
+ CC=${HOST_ARCH}-gcc make
+ make install DESTDIR=/usr/${HOST_ARCH}
+
+These three workflows should handle the bulk of building and installing the
+build-time dependencies for FRR. Verify that the installed files are being
+placed correctly into the sysroot and were actually built using the
+cross-compile toolchain, not by the native toolchain by accident.
+
+Dependency Notes
+^^^^^^^^^^^^^^^^
+
+There are a lot of things that can go wrong during a cross-compilation. Some of
+the more common errors and a few special considerations are collected below for
+reference.
+
+libyang
+"""""""
+
+``-DENABLE_LYD_PRIV=ON`` should be provided during the CMake step.
+
+Ensure also that the version of ``libyang`` being installed corresponds to the
+version required by the targeted FRR version.
+
+gRPC
+""""
+
+This piece is requisite only if the ``--enable-grpc`` flag will be passed
+later on to FRR. One may get burned when compiling gRPC if the ``protoc``
+version on the build machine differs from the version of ``protoc`` being linked
+to during a gRPC build. The error messages from this defect look like:
+
+.. code-block:: terminal
+
+ gens/src/proto/grpc/channelz/channelz.pb.h: In member function ‘void grpc::channelz::v1::ServerRef::set_name(const char*, size_t)’:
+ gens/src/proto/grpc/channelz/channelz.pb.h:9127:64: error: ‘EmptyDefault’ is not a member of ‘google::protobuf::internal::ArenaStringPtr’
+ 9127 | name_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, ::std::string(
+
+This happens because protocol buffer code generation uses ``protoc`` to create
+classes with different getters and setters corresponding to the protobuf data
+defined by the source tree's ``.proto`` files. Clearly the cross-compiled
+``protoc`` cannot be used for this code generation because that binary is built
+for a different CPU.
+
+The solution is to install matching versions of native and cross-compiled
+protocol buffers; this way the native binary will generate code and the
+cross-compiled library will be linked to by gRPC and these versions will not
+disagree.
+
+----
+
+The ``-latomic`` linker flag may also be necessary here if using ``libstdc++``
+since GCC's C++11 implementation makes library calls in certain cases for
+``<atomic>`` so ``-latomic`` cannot be assumed.
+
+Cross-compiling FRR Itself
+--------------------------
+
+With all the necessary libraries cross-compiled and installed into the sysroot,
+the last thing to actually build is FRR itself:
+
+.. code-block:: shell
+
+ # Clone and bootstrap the build
+ git clone 'https://github.com/FRRouting/frr.git'
+ # (e.g.) git checkout stable/7.5
+ ./bootstrap.sh
+
+ # Build clippy using the native toolchain
+ mkdir build-clippy
+ cd build-clippy
+ ../configure --enable-clippy-only
+ make clippy-only
+ cd ..
+
+ # Next, configure FRR and use the clippy we just built
+ ./configure \
+ CC=${HOST_ARCH}-gcc \
+ CXX=${HOST_ARCH}-g++ \
+ --host=${HOST_ARCH} \
+ --with-sysroot=/usr/${HOST_ARCH} \
+ --with-clippy=./build-clippy/lib/clippy \
+ --sysconfdir=/etc/frr \
+ --sbindir="\${prefix}/lib/frr" \
+ --localstatedir=/var/run/frr \
+ --prefix=/usr \
+ --enable-user=frr \
+ --enable-group=frr \
+ --enable-vty-group=frrvty \
+ --disable-doc \
+ --enable-grpc
+
+ # Send it
+ make
+
+Installation to Host Machine
+----------------------------
+
+If no errors were observed during the previous steps it is safe to ``make
+install`` FRR into its own directory.
+
+.. code-block:: shell
+
+ # Install FRR its own "sysroot"
+ make install DESTDIR=/some/path/to/sysroot
+
+After running the above command, FRR binaries, modules and example configuration
+files will be installed into some path on the build machine. The directory
+will have folders like ``/usr`` and ``/etc``; this "root" should now be copied
+to the host and installed on top of the root directory there.
+
+.. code-block:: shell
+
+ # Tar this sysroot (preserving permissions)
+ tar -C /some/path/to/sysroot -cpvf frr-${HOST_ARCH}.tar .
+
+ # Transfer tar file to host machine
+ scp frr-${HOST_ARCH}.tar me@host-machine:
+
+ # Overlay the tarred sysroot on top of the host machine's root
+ ssh me@host-machine <<-EOF
+ # May need to elevate permissions here
+ tar -C / -xpvf frr-${HOST_ARCH}.tar.gz .
+ EOF
+
+Now FRR should be installed just as if ``make install`` had been run on the host
+machine. Create configuration files and assign permissions as needed. Lastly,
+ensure the correct users and groups exist for FRR on the host machine.
+
+Troubleshooting
+---------------
+
+Even when every precaution has been taken some things may still go wrong! This
+section details some common runtime problems.
+
+Mismatched Libraries
+^^^^^^^^^^^^^^^^^^^^
+
+If you see something like this after installing on the host:
+
+.. code-block:: console
+
+ /usr/lib/frr/zebra: error while loading shared libraries: libyang.so.1: cannot open shared object file: No such file or directory
+
+... at least one of FRR's dependencies which was linked to the binary earlier is
+not available on the host OS. Even if it has been installed the host
+repository's version may lag what is needed for more recent FRR builds (this is
+likely to happen with YANG at the moment).
+
+If the matching library is not available from the host OS package manager it may
+be possible to compile them using the same toolchain used to compile FRR. The
+library may have already been built earlier when compiling FRR on the build
+machine, in which case it may be as simple as following the same workflow laid
+out during the `Installation to Host Machine`_.
+
+Mismatched Glibc Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The version and implementation of the C standard library must match on both the
+host and build toolchain. The error corresponding to this misconfiguration will
+look like:
+
+.. code-block:: console
+
+ /usr/lib/frr/zebra: /lib/${HOST_ARCH}/libc.so.6: version `GLIBC_2.32' not found (required by /usr/lib/libfrr.so.0)
+
+See the earlier warning about preventing a `glibc mismatch`_.
diff --git a/doc/developer/library.rst b/doc/developer/library.rst
index 3d5c6a2a15..2e36c253ea 100644
--- a/doc/developer/library.rst
+++ b/doc/developer/library.rst
@@ -11,10 +11,11 @@ Library Facilities (libfrr)
rcu
lists
logging
+ xrefs
locking
hooks
cli
modules
- lua
+ scripting
diff --git a/doc/developer/lua.rst b/doc/developer/lua.rst
deleted file mode 100644
index 3315c31ad7..0000000000
--- a/doc/developer/lua.rst
+++ /dev/null
@@ -1,65 +0,0 @@
-.. _lua:
-
-Lua
-===
-
-Lua is currently experimental within FRR and has very limited
-support. If you would like to compile FRR with Lua you must
-follow these steps:
-
-1. Installation of Relevant Libraries
-
- .. code-block:: shell
-
- apt-get install lua5.3 liblua5-3 liblua5.3-dev
-
- These are the Debian libraries that are needed. There should
- be equivalent RPM's that can be found
-
-2. Compilation
-
- Configure needs these options
-
- .. code-block:: shell
-
- ./configure --enable-dev-build --enable-lua <all other interesting options>
-
- Typically you just include the two new enable lines to build with it.
-
-3. Using Lua
-
- * Copy tools/lua.scr into /etc/frr
-
- * Create a route-map match command
-
- .. code-block:: console
-
- !
- router bgp 55
- neighbor 10.50.11.116 remote-as external
- address-family ipv4 unicast
- neighbor 10.50.11.116 route-map TEST in
- exit-address-family
- !
- route-map TEST permit 10
- match command mooey
- !
-
- * In the lua.scr file make sure that you have a function named 'mooey'
-
- .. code-block:: console
-
- function mooey ()
- zlog_debug(string.format("afi: %d: %s %d ifdx: %d aspath: %s localpref: %d",
- prefix.family, prefix.route, nexthop.metric,
- nexthop.ifindex, nexthop.aspath, nexthop.localpref))
-
- nexthop.metric = 33
- nexthop.localpref = 13
- return 3
- end
-
-4. General Comments
-
- Please be aware that this is extremely experimental and needs a ton of work
- to get this up into a state that is usable.
diff --git a/doc/developer/ospf-sr.rst b/doc/developer/ospf-sr.rst
index efe9b1b12f..263db9dfb9 100644
--- a/doc/developer/ospf-sr.rst
+++ b/doc/developer/ospf-sr.rst
@@ -17,6 +17,7 @@ Supported Features
* Automatic provisioning of MPLS table
* Equal Cost Multi-Path (ECMP)
* Static route configuration with label stack up to 32 labels
+* TI-LFA (for P2P interfaces only)
Interoperability
----------------
@@ -182,6 +183,71 @@ called. Once check the validity of labels, they are send to ZEBRA layer through
command for deletion. This is completed by a new labelled route through
`ZEBRA_ROUTE_ADD` command, respectively `ZEBRA_ROUTE_DELETE` command.
+TI-LFA
+^^^^^^
+
+Experimental support for Topology Independent LFA (Loop-Free Alternate), see
+for example 'draft-bashandy-rtgwg-segment-routing-ti-lfa-05'. The related
+files are `ospf_ti_lfa.c/h`.
+
+The current implementation is rather naive and does not support the advanced
+optimizations suggested in e.g. RFC7490 or RFC8102. It focuses on providing
+the essential infrastructure which can also later be used to enhance the
+algorithmic aspects.
+
+Supported features:
+
+* Link and node protection
+* Intra-area support
+* Proper use of Prefix- and Adjacency-SIDs in label stacks
+* Asymmetric weights (using reverse SPF)
+* Non-adjacent P/Q spaces
+* Protection of Prefix-SIDs
+
+If configured for every SPF run the routing table is enriched with additional
+backup paths for every prefix. The corresponding Prefix-SIDs are updated with
+backup paths too within the OSPF SR update task.
+
+Informal High-Level Algorithm Description:
+
+::
+
+ p_spaces = empty_list()
+
+ for every protected_resource (link or node):
+ p_space = generate_p_space(protected_resource)
+ p_space.q_spaces = empty_list()
+
+ for every destination that is affected by the protected_resource:
+ q_space = generate_q_space(destination)
+
+ # The label stack is stored in q_space
+ generate_label_stack(p_space, q_space)
+
+ # The p_space collects all its q_spaces
+ p_spaces.q_spaces.add(q_space)
+
+ p_spaces.add(p_space)
+
+ adjust_routing_table(p_spaces)
+
+Possible Performance Improvements:
+
+* Improve overall datastructures, get away from linked lists for vertices
+* Don't calculate a Q space for every destination, but for a minimum set of
+ backup paths that cover all destinations in the post-convergence SPF. The
+ thinking here is that once a backup path is known that it is also a backup
+ path for all nodes on the path themselves. This can be done by using the
+ leafs of a trimmed minimum spanning tree generated out of the post-
+ convergence SPF tree for that particular P space.
+* For an alternative (maybe better) optimization look at
+ https://tools.ietf.org/html/rfc7490#section-5.2.1.3 which describes using
+ the Q space of the node which is affected by e.g. a link failure. Note that
+ this optimization is topology dependent.
+
+It is highly recommended to read e.g. `Segment Routing I/II` by Filsfils to
+understand the basics of Ti-LFA.
+
Configuration
-------------
diff --git a/doc/developer/scripting.rst b/doc/developer/scripting.rst
new file mode 100644
index 0000000000..b0413619ab
--- /dev/null
+++ b/doc/developer/scripting.rst
@@ -0,0 +1,433 @@
+.. _scripting:
+
+Scripting
+=========
+
+.. seealso:: User docs for scripting
+
+Overview
+--------
+
+FRR has the ability to call Lua scripts to perform calculations, make
+decisions, or otherwise extend builtin behavior with arbitrary user code. This
+is implemented using the standard Lua C bindings. The supported version of Lua
+is 5.3.
+
+C objects may be passed into Lua and Lua objects may be retrieved by C code via
+a marshalling system. In this way, arbitrary data from FRR may be passed to
+scripts. It is possible to pass C functions as well.
+
+The Lua environment is isolated from the C environment; user scripts cannot
+access FRR's address space unless explicitly allowed by FRR.
+
+For general information on how Lua is used to extend C, refer to Part IV of
+"Programming in Lua".
+
+https://www.lua.org/pil/contents.html#24
+
+
+Design
+------
+
+Why Lua
+^^^^^^^
+
+Lua is designed to be embedded in C applications. It is very small; the
+standard library is 220K. It is relatively fast. It has a simple, minimal
+syntax that is relatively easy to learn and can be understood by someone with
+little to no programming experience. Moreover it is widely used to add
+scripting capabilities to applications. In short it is designed for this task.
+
+Reasons against supporting multiple scripting languages:
+
+- Each language would require different FFI methods, and specifically
+ different object encoders; a lot of code
+- Languages have different capabilities that would have to be brought to
+ parity with each other; a lot of work
+- Languages have vastly different performance characteristics; this would
+ create alot of basically unfixable issues, and result in a single de facto
+ standard scripting language (the fastest)
+- Each language would need a dedicated maintainer for the above reasons;
+ this is pragmatically difficult
+- Supporting multiple languages fractures the community and limits the audience
+ with which a given script can be shared
+
+General
+^^^^^^^
+
+FRR's concept of a script is somewhat abstracted away from the fact that it is
+Lua underneath. A script in has two things:
+
+- name
+- state
+
+In code:
+
+.. code-block:: c
+
+ struct frrscript {
+ /* Script name */
+ char *name;
+
+ /* Lua state */
+ struct lua_State *L;
+ };
+
+
+``name`` is simply a string. Everything else is in ``state``, which is itself a
+Lua library object (``lua_State``). This is an opaque struct that is
+manipulated using ``lua_*`` functions. The basic ones are imported from
+``lua.h`` and the rest are implemented within FRR to fill our use cases. The
+thing to remember is that all operations beyond the initial loading the script
+take place on this opaque state object.
+
+There are four basic actions that can be done on a script:
+
+- load
+- execute
+- query state
+- unload
+
+They are typically done in this order.
+
+
+Loading
+^^^^^^^
+
+A snippet of Lua code is referred to as a "chunk". These are simply text. FRR
+presently assumes chunks are located in individual files specific to one task.
+These files are stored in the scripts directory and must end in ``.lua``.
+
+A script object is created by loading a script. This is done with
+``frrscript_load()``. This function takes the name of the script and an
+optional callback function. The string ".lua" is appended to the script name,
+and the resultant filename is looked for in the scripts directory.
+
+For example, to load ``/etc/frr/scripts/bingus.lua``:
+
+.. code-block:: c
+
+ struct frrscript *fs = frrscript_load("bingus", NULL);
+
+During loading the script is validated for syntax and its initial environment
+is setup. By default this does not include the Lua standard library; there are
+security issues to consider, though for practical purposes untrusted users
+should not be able to write the scripts directory anyway. If desired the Lua
+standard library may be added to the script environment using
+``luaL_openlibs(fs->L)`` after loading the script. Further information on
+setting up the script environment is in the Lua manual.
+
+
+Executing
+^^^^^^^^^
+
+After loading, scripts may be executed. A script may take input in the form of
+variable bindings set in its environment prior to being run, and may provide
+results by setting the value of variables. Arbitrary C values may be
+transferred into the script environment, including functions.
+
+A typical execution call looks something like this:
+
+.. code-block:: c
+
+ struct frrscript *fs = frrscript_load(...);
+
+ int status_ok = 0, status_fail = 1;
+ struct prefix p = ...;
+
+ struct frrscript_env env[] = {
+ {"integer", "STATUS_FAIL", &status_fail},
+ {"integer", "STATUS_OK", &status_ok},
+ {"prefix", "myprefix", &p},
+ {}};
+
+ int result = frrscript_call(fs, env);
+
+
+To execute a loaded script, we need to define the inputs. These inputs are
+passed by binding values to variable names that will be accessible within the
+Lua environment. Basically, all communication with the script takes place via
+global variables within the script, and to provide inputs we predefine globals
+before the script runs. This is done by passing ``frrscript_call()`` an array
+of ``struct frrscript_env``. Each struct has three fields. The first identifies
+the type of the value being passed; more on this later. The second defines the
+name of the global variable within the script environment to bind the third
+argument (the value) to.
+
+The script is then executed and returns a general status code. In the success
+case this will be 0, otherwise it will be nonzero. The script itself does not
+determine this code, it is provided by the Lua interpreter.
+
+
+Querying State
+^^^^^^^^^^^^^^
+
+When a chunk is executed, its state at exit is preserved and can be inspected.
+
+After running a script, results may be retrieved by querying the script's
+state. Again this is done by retrieving the values of global variables, which
+are known to the script author to be "output" variables.
+
+A result is retrieved like so:
+
+.. code-block:: c
+
+ struct frrscript_env myresult = {"string", "myresult"};
+
+ char *myresult = frrscript_get_result(fs, &myresult);
+
+ ... do something ...
+
+ XFREE(MTYPE_TMP, myresult);
+
+
+As with arguments, results are retrieved by providing a ``struct
+frrscript_env`` specifying a type and a global name. No value is necessary, nor
+is it modified by ``frrscript_get_result()``. That function simply extracts the
+requested value from the script state and returns it.
+
+In most cases the returned value will be allocated with ``MTYPE_TMP`` and will
+need to be freed after use.
+
+
+Unloading
+^^^^^^^^^
+
+To destroy a script and its associated state:
+
+.. code-block:: c
+
+ frrscript_unload(fs);
+
+Values returned by ``frrscript_get_result`` are still valid after the script
+they were retrieved from is unloaded.
+
+Note that you must unload and then load the script if you want to reset its
+state, for example to run it again with different inputs. Otherwise the state
+from the previous run carries over into subsequent runs.
+
+
+.. _marshalling:
+
+Marshalling
+^^^^^^^^^^^
+
+Earlier sections glossed over the meaning of the type name field in ``struct
+frrscript_env`` and how data is passed between C and Lua. Lua, as a dynamically
+typed, garbage collected language, cannot directly use C values without some
+kind of marshalling / unmarshalling system to translate types between the two
+runtimes.
+
+Lua communicates with C code using a stack. C code wishing to provide data to
+Lua scripts must provide a function that marshalls the C data into a Lua
+representation and pushes it on the stack. C code wishing to retrieve data from
+Lua must provide a corresponding unmarshalling function that retrieves a Lua
+value from the stack and converts it to the corresponding C type. These two
+functions, together with a chosen name of the type they operate on, are
+referred to as ``codecs`` in FRR.
+
+A codec is defined as:
+
+.. code-block:: c
+
+ typedef void (*encoder_func)(lua_State *, const void *);
+ typedef void *(*decoder_func)(lua_State *, int);
+
+ struct frrscript_codec {
+ const char *typename;
+ encoder_func encoder;
+ decoder_func decoder;
+ };
+
+A typename string and two function pointers.
+
+``typename`` can be anything you want. For example, for the combined types of
+``struct prefix`` and its equivalent in Lua I have chosen the name ``prefix``.
+There is no restriction on naming here, it is just a human name used as a key
+and specified when passing and retrieving values.
+
+``encoder`` is a function that takes a ``lua_State *`` and a C type and pushes
+onto the Lua stack a value representing the C type. For C structs, the usual
+case, this will typically be a Lua table (tables are the only datastructure Lua
+has). For example, here is the encoder function for ``struct prefix``:
+
+
+.. code-block:: c
+
+ void lua_pushprefix(lua_State *L, const struct prefix *prefix)
+ {
+ char buffer[PREFIX_STRLEN];
+
+ zlog_debug("frrlua: pushing prefix table");
+
+ lua_newtable(L);
+ lua_pushstring(L, prefix2str(prefix, buffer, PREFIX_STRLEN));
+ lua_setfield(L, -2, "network");
+ lua_pushinteger(L, prefix->prefixlen);
+ lua_setfield(L, -2, "length");
+ lua_pushinteger(L, prefix->family);
+ lua_setfield(L, -2, "family");
+ }
+
+This function pushes a single value onto the Lua stack. It is a table whose equivalent in Lua is:
+
+.. code-block::
+
+ { ["network"] = "1.2.3.4/24", ["prefixlen"] = 24, ["family"] = 2 }
+
+
+``decoder`` does the reverse; it takes a ``lua_State *`` and an index into the
+stack, and unmarshalls a Lua value there into the corresponding C type. Again
+for ``struct prefix``:
+
+
+.. code-block:: c
+
+ void *lua_toprefix(lua_State *L, int idx)
+ {
+ struct prefix *p = XCALLOC(MTYPE_TMP, sizeof(struct prefix));
+
+ lua_getfield(L, idx, "network");
+ str2prefix(lua_tostring(L, -1), p);
+ lua_pop(L, 1);
+
+ return p;
+ }
+
+By convention these functions should be called ``lua_to*``, as this is the
+naming convention used by the Lua C library for the basic types e.g.
+``lua_tointeger`` and ``lua_tostring``.
+
+The returned data must always be copied off the stack and the copy must be
+allocated with ``MTYPE_TMP``. This way it is possible to unload the script
+(destroy the state) without invalidating any references to values stored in it.
+
+To register a new type with its corresponding encoding functions:
+
+.. code-block:: c
+
+ struct frrscript_codec frrscript_codecs_lib[] = {
+ {.typename = "prefix",
+ .encoder = (encoder_func)lua_pushprefix,
+ .decoder = lua_toprefix},
+ {.typename = "sockunion",
+ .encoder = (encoder_func)lua_pushsockunion,
+ .decoder = lua_tosockunion},
+ ...
+ {}};
+
+ frrscript_register_type_codecs(frrscript_codecs_lib);
+
+From this point on the type names are available to be used when calling any
+script and getting its results.
+
+.. note::
+
+ Marshalled types are not restricted to simple values like integers, strings
+ and tables. It is possible to marshall a type such that the resultant object
+ in Lua is an actual object-oriented object, complete with methods that call
+ back into defined C functions. See the Lua manual for how to do this; for a
+ code example, look at how zlog is exported into the script environment.
+
+
+Script Environment
+------------------
+
+Logging
+^^^^^^^
+
+For convenience, script environments are populated by default with a ``log``
+object which contains methods corresponding to each of the ``zlog`` levels:
+
+.. code-block:: lua
+
+ log.info("info")
+ log.warn("warn")
+ log.error("error")
+ log.notice("notice")
+ log.debug("debug")
+
+The log messages will show up in the daemon's log output.
+
+
+Examples
+--------
+
+For a complete code example involving passing custom types, retrieving results,
+and doing complex calculations in Lua, look at the implementation of the
+``match script SCRIPT`` command for BGP routemaps. This example calls into a
+script with a route prefix and attributes received from a peer and expects the
+script to return a match / no match / match and update result.
+
+An example script to use with this follows. This script matches, does not match
+or updates a route depending on how many BGP UPDATE messages the peer has
+received when the script is called, simply as a demonstration of what can be
+accomplished with scripting.
+
+.. code-block:: lua
+
+
+ -- Example route map matching
+ -- author: qlyoung
+ --
+ -- The following variables are available to us:
+ -- log
+ -- logging library, with the usual functions
+ -- prefix
+ -- the route under consideration
+ -- attributes
+ -- the route's attributes
+ -- peer
+ -- the peer which received this route
+ -- RM_FAILURE
+ -- status code in case of failure
+ -- RM_NOMATCH
+ -- status code for no match
+ -- RM_MATCH
+ -- status code for match
+ -- RM_MATCH_AND_CHANGE
+ -- status code for match-and-set
+ --
+ -- We need to set the following out values:
+ -- action
+ -- Set to the appropriate status code to indicate what we did
+ -- attributes
+ -- Setting fields on here will propagate them back up to the caller if
+ -- 'action' is set to RM_MATCH_AND_CHANGE.
+
+
+ log.info("Evaluating route " .. prefix.network .. " from peer " .. peer.remote_id.string)
+
+ function on_match (prefix, attrs)
+ log.info("Match")
+ action = RM_MATCH
+ end
+
+ function on_nomatch (prefix, attrs)
+ log.info("No match")
+ action = RM_NOMATCH
+ end
+
+ function on_match_and_change (prefix, attrs)
+ action = RM_MATCH_AND_CHANGE
+ log.info("Match and change")
+ attrs["metric"] = attrs["metric"] + 7
+ end
+
+ special_routes = {
+ ["172.16.10.4/24"] = on_match,
+ ["172.16.13.1/8"] = on_nomatch,
+ ["192.168.0.24/8"] = on_match_and_change,
+ }
+
+
+ if special_routes[prefix.network] then
+ special_routes[prefix.network](prefix, attributes)
+ elseif peer.stats.update_in % 3 == 0 then
+ on_match(prefix, attributes)
+ elseif peer.stats.update_in % 2 == 0 then
+ on_nomatch(prefix, attributes)
+ else
+ on_match_and_change(prefix, attributes)
+ end
+
diff --git a/doc/developer/subdir.am b/doc/developer/subdir.am
index 0129be6bf1..f7e4486ef0 100644
--- a/doc/developer/subdir.am
+++ b/doc/developer/subdir.am
@@ -27,6 +27,7 @@ dev_RSTFILES = \
doc/developer/building.rst \
doc/developer/cli.rst \
doc/developer/conf.py \
+ doc/developer/cross-compiling.rst \
doc/developer/frr-release-procedure.rst \
doc/developer/grpc.rst \
doc/developer/hooks.rst \
@@ -37,7 +38,6 @@ dev_RSTFILES = \
doc/developer/lists.rst \
doc/developer/locking.rst \
doc/developer/logging.rst \
- doc/developer/lua.rst \
doc/developer/memtypes.rst \
doc/developer/modules.rst \
doc/developer/next-hop-tracking.rst \
@@ -52,12 +52,14 @@ dev_RSTFILES = \
doc/developer/path-internals.rst \
doc/developer/path.rst \
doc/developer/rcu.rst \
+ doc/developer/scripting.rst \
doc/developer/static-linking.rst \
doc/developer/tracing.rst \
doc/developer/testing.rst \
doc/developer/topotests-snippets.rst \
doc/developer/topotests.rst \
doc/developer/workflow.rst \
+ doc/developer/xrefs.rst \
doc/developer/zebra.rst \
# end
diff --git a/doc/developer/topotests-markers.rst b/doc/developer/topotests-markers.rst
new file mode 100644
index 0000000000..9f92412595
--- /dev/null
+++ b/doc/developer/topotests-markers.rst
@@ -0,0 +1,114 @@
+.. _topotests-markers:
+
+Markers
+--------
+
+To allow for automated selective testing on large scale continuous integration
+systems, all tests must be marked with at least one of the following markers:
+
+* babeld
+* bfdd
+* bgpd
+* eigrpd
+* isisd
+* ldpd
+* nhrpd
+* ospf6d
+* ospfd
+* pathd
+* pbrd
+* pimd
+* ripd
+* ripngd
+* sharpd
+* staticd
+* vrrpd
+
+The markers corespond to the daemon subdirectories in FRR's source code and have
+to be added to tests on a module level depending on which daemons are used
+during the test.
+
+The goal is to have continuous integration systems scan code submissions, detect
+changes to files in a daemons subdirectory and select only tests using that
+daemon to run to shorten developers waiting times for test results and save test
+infrastructure resources.
+
+Newly written modules and code changes on tests, which do not contain any or
+incorrect markers will be rejected by reviewers.
+
+
+Registering markers
+^^^^^^^^^^^^^^^^^^^
+The Registration of new markers takes place in the file
+``tests/topotests/pytest.ini``:
+
+.. code:: python3
+
+ # tests/topotests/pytest.ini
+ [pytest]
+ ...
+ markers =
+ babeld: Tests that run against BABELD
+ bfdd: Tests that run against BFDD
+ ...
+ vrrpd: Tests that run against VRRPD
+
+
+Adding markers to tests
+^^^^^^^^^^^^^^^^^^^^^^^
+Markers are added to a test by placing a global variable in the test module.
+
+Adding a single marker:
+
+.. code:: python3
+
+ import pytest
+ ...
+
+ # add after imports, before defining classes or functions:
+ pytestmark = pytest.mark.bfdd
+
+ ...
+
+ def test_using_bfdd():
+
+
+Adding multiple markers:
+
+.. code:: python3
+
+ import pytest
+ ...
+
+ # add after imports, before defining classes or functions:
+ pytestmark = [
+ pytest.mark.bgpd,
+ pytest.mark.ospfd,
+ pytest.mark.ospf6d
+ ]
+
+ ...
+
+ def test_using_bgpd_ospfd_ospf6d():
+
+
+Selecting marked modules for testing
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Selecting by a single marker:
+
+.. code:: bash
+
+ pytest -v -m isisd
+
+Selecting by multiple markers:
+
+.. code:: bash
+
+ pytest -v -m "isisd or ldpd or nhrpd"
+
+
+Further Information
+^^^^^^^^^^^^^^^^^^^
+The `online pytest documentation <https://docs.pytest.org/en/stable/example/markers.html>`_
+provides further information and usage examples for pytest markers.
+
diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst
index 5486fd826d..93d81548b2 100644
--- a/doc/developer/topotests.rst
+++ b/doc/developer/topotests.rst
@@ -21,8 +21,10 @@ Installing Mininet Infrastructure
apt-get install mininet
apt-get install python-pip
apt-get install iproute
+ apt-get install iperf
pip install ipaddr
pip install "pytest<5"
+ pip install "scapy>=2.4.2"
pip install exabgp==3.4.17 (Newer 4.0 version of exabgp is not yet
supported)
useradd -d /var/run/exabgp/ -s /bin/false exabgp
@@ -49,6 +51,28 @@ Next, update security limits by changing :file:`/etc/security/limits.conf` to::
Reboot for options to take effect.
+SNMP Utilities Installation
+"""""""""""""""""""""""""""
+
+To run SNMP test you need to install SNMP utilities and MIBs. Unfortunately
+there are some errors in the upstream MIBS which need to be patched up. The
+following steps will get you there on Ubuntu 20.04.
+
+.. code:: shell
+
+ apt install snmpd snmp
+ apt install snmp-mibs-downloader
+ download-mibs
+ wget http://www.iana.org/assignments/ianaippmmetricsregistry-mib/ianaippmmetricsregistry-mib -O /usr/share/snmp/mibs/iana/IANA-IPPM-METRICS-REGISTRY-MIB
+ wget http://pastebin.com/raw.php?i=p3QyuXzZ -O /usr/share/snmp/mibs/ietf/SNMPv2-PDU
+ wget http://pastebin.com/raw.php?i=gG7j8nyk -O /usr/share/snmp/mibs/ietf/IPATM-IPMC-MIB
+ edit /etc/snmp/snmp.conf to look like this
+ # As the snmp packages come without MIB files due to license reasons, loading
+ # of MIBs is disabled by default. If you added the MIBs you can reenable
+ # loading them by commenting out the following line.
+ mibs +ALL
+
+
FRR Installation
^^^^^^^^^^^^^^^^
@@ -84,6 +108,7 @@ If you prefer to manually build FRR, then use the following suggested config:
--enable-user=frr \
--enable-group=frr \
--enable-vty-group=frrvty \
+ --enable-snmp=agentx \
--with-pkg-extra-version=-my-manual-build
And create ``frr`` user and ``frrvty`` group as follows:
@@ -769,6 +794,8 @@ Requirements:
conforms with this, run it without the :option:`-s` parameter.
- Use `black <https://github.com/psf/black>`_ code formatter before creating
a pull request. This ensures we have a unified code style.
+- Mark test modules with pytest markers depending on the daemons used during the
+ tests (s. Markers)
Tips:
@@ -927,6 +954,8 @@ does what you need. If nothing is similar, then you may create a new topology,
preferably, using the newest template
(:file:`tests/topotests/example-test/test_template.py`).
+.. include:: topotests-markers.rst
+
.. include:: topotests-snippets.rst
License
diff --git a/doc/developer/tracing.rst b/doc/developer/tracing.rst
index d54f6c7aaa..c194ae1270 100644
--- a/doc/developer/tracing.rst
+++ b/doc/developer/tracing.rst
@@ -308,6 +308,31 @@ Limitations
Tracers do not like ``fork()`` or ``dlopen()``. LTTng has some workarounds for
this involving interceptor libraries using ``LD_PRELOAD``.
+If you're running FRR in a typical daemonizing way (``-d`` to the daemons)
+you'll need to run the daemons like so:
+
+.. code-block:: shell
+
+ LD_PRELOAD=liblttng-ust-fork.so <daemon>
+
+
+If you're using systemd this you can accomplish this for all daemons by
+modifying ``frr.service`` like so:
+
+.. code-block:: diff
+
+ --- a/frr.service
+ +++ b/frr.service
+ @@ -7,6 +7,7 @@ Before=network.target
+ OnFailure=heartbeat-failed@%n.service
+
+ [Service]
+ +Environment="LD_PRELOAD=liblttng-ust-fork.so"
+ Nice=-5
+ Type=forking
+ NotifyAccess=all
+
+
USDT tracepoints are relatively high overhead and probably shouldn't be used
for "flight recorder" functionality, i.e. enabling and passively recording all
events for monitoring purposes. It's generally okay to use LTTng like this,
diff --git a/doc/developer/xrefs.rst b/doc/developer/xrefs.rst
new file mode 100644
index 0000000000..6a0794d41b
--- /dev/null
+++ b/doc/developer/xrefs.rst
@@ -0,0 +1,170 @@
+.. _xrefs:
+
+Introspection (xrefs)
+=====================
+
+The FRR library provides an introspection facility called "xrefs." The intent
+is to provide structured access to annotated entities in the compiled binary,
+such as log messages and thread scheduling calls.
+
+Enabling and use
+----------------
+
+Support for emitting an xref is included in the macros for the specific
+entities, e.g. :c:func:`zlog_info` contains the relevant statements. The only
+requirement for the system to work is a GNU compatible linker that supports
+section start/end symbols. (The only known linker on any system FRR supports
+that does not do this is the Solaris linker.)
+
+To verify xrefs have been included in a binary or dynamic library, run
+``readelf -n binary``. For individual object files, it's
+``readelf -S object.o | grep xref_array`` instead.
+
+An extraction tool will be added in a future commit.
+
+Structure and contents
+----------------------
+
+As a slight improvement to security and fault detection, xrefs are divided into
+a ``const struct xref *`` and an optional ``struct xrefdata *``. The required
+const part contains:
+
+.. c:member:: enum xref_type xref.type
+
+ Identifies what kind of object the xref points to.
+
+.. c:member:: int line
+.. c:member:: const char *xref.file
+.. c:member:: const char *xref.func
+
+ Source code location of the xref. ``func`` will be ``<global>`` for
+ xrefs outside of a function.
+
+.. c:member:: struct xrefdata *xref.xrefdata
+
+ The optional writable part of the xref. NULL if no non-const part exists.
+
+The optional non-const part has:
+
+.. c:member:: const struct xref *xrefdata.xref
+
+ Pointer back to the constant part. Since circular pointers are close to
+ impossible to emit from inside a function body's static variables, this
+ is initialized at startup.
+
+.. c:member:: char xrefdata.uid[16]
+
+ Unique identifier, see below.
+
+.. c:member:: const char *xrefdata.hashstr
+.. c:member:: uint32_t xrefdata.hashu32[2]
+
+ Input to unique identifier calculation. These should encompass all
+ details needed to make an xref unique. If more than one string should
+ be considered, use string concatenation for the initializer.
+
+Both structures can be extended by embedding them in a larger type-specific
+struct, e.g. ``struct xref_logmsg *``.
+
+Unique identifiers
+------------------
+
+All xrefs that have a writable ``struct xrefdata *`` part are assigned an
+unique identifier, which is formed as base32 (crockford) SHA256 on:
+
+- the source filename
+- the ``hashstr`` field
+- the ``hashu32`` fields
+
+.. note::
+
+ Function names and line numbers are intentionally not included to allow
+ moving items within a file without affecting the identifier.
+
+For running executables, this hash is calculated once at startup. When
+directly reading from an ELF file with external tooling, the value must be
+calculated when necessary.
+
+The identifiers have the form ``AXXXX-XXXXX`` where ``X`` is
+``0-9, A-Z except I,L,O,U`` and ``A`` is ``G-Z except I,L,O,U`` (i.e. the
+identifiers always start with a letter.) When reading identifiers from user
+input, ``I`` and ``L`` should be replaced with ``1`` and ``O`` should be
+replaced with ``0``. There are 49 bits of entropy in this identifier.
+
+Underlying machinery
+--------------------
+
+Xrefs are nothing other than global variables with some extra glue to make
+them possible to find from the outside by looking at the binary. The first
+non-obvious part is that they can occur inside of functions, since they're
+defined as ``static``. They don't have a visible name -- they don't need one.
+
+To make finding these variables possible, another global variable, a pointer
+to the first one, is created in the same way. However, it is put in a special
+ELF section through ``__attribute__((section("xref_array")))``. This is the
+section you can see with readelf.
+
+Finally, on the level of a whole executable or library, the linker will stuff
+the individual pointers consecutive to each other since they're in the same
+section — hence the array. Start and end of this array is given by the
+linker-autogenerated ``__start_xref_array`` and ``__stop_xref_array`` symbols.
+Using these, both a constructor to run at startup as well as an ELF note are
+created.
+
+The ELF note is the entrypoint for externally retrieving xrefs from a binary
+without having to run it. It can be found by walking through the ELF data
+structures even if the binary has been fully stripped of debug and section
+information. SystemTap's SDT probes & LTTng's trace points work in the same
+way (though they emit 1 note for each probe, while xrefs only emit one note
+in total which refers to the array.) Using xrefs does not impact SystemTap
+or LTTng, the notes have identifiers they can be distinguished by.
+
+The ELF structure of a linked binary (library or executable) will look like
+this::
+
+ $ readelf --wide -l -n lib/.libs/libfrr.so
+
+ Elf file type is DYN (Shared object file)
+ Entry point 0x67d21
+ There are 12 program headers, starting at offset 64
+
+ Program Headers:
+ Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
+ PHDR 0x000040 0x0000000000000040 0x0000000000000040 0x0002a0 0x0002a0 R 0x8
+ INTERP 0x125560 0x0000000000125560 0x0000000000125560 0x00001c 0x00001c R 0x10
+ [Requesting program interpreter: /lib64/ld-linux-x86-64.so.2]
+ LOAD 0x000000 0x0000000000000000 0x0000000000000000 0x02aff0 0x02aff0 R 0x1000
+ LOAD 0x02b000 0x000000000002b000 0x000000000002b000 0x0b2889 0x0b2889 R E 0x1000
+ LOAD 0x0de000 0x00000000000de000 0x00000000000de000 0x070048 0x070048 R 0x1000
+ LOAD 0x14e428 0x000000000014f428 0x000000000014f428 0x00fb70 0x01a2b8 RW 0x1000
+ DYNAMIC 0x157a40 0x0000000000158a40 0x0000000000158a40 0x000270 0x000270 RW 0x8
+ NOTE 0x0002e0 0x00000000000002e0 0x00000000000002e0 0x00004c 0x00004c R 0x4
+ TLS 0x14e428 0x000000000014f428 0x000000000014f428 0x000000 0x000008 R 0x8
+ GNU_EH_FRAME 0x12557c 0x000000000012557c 0x000000000012557c 0x00819c 0x00819c R 0x4
+ GNU_STACK 0x000000 0x0000000000000000 0x0000000000000000 0x000000 0x000000 RW 0x10
+ GNU_RELRO 0x14e428 0x000000000014f428 0x000000000014f428 0x009bd8 0x009bd8 R 0x1
+
+ (...)
+
+ Displaying notes found in: .note.gnu.build-id
+ Owner Data size Description
+ GNU 0x00000014 NT_GNU_BUILD_ID (unique build ID bitstring) Build ID: 6a1f66be38b523095ebd6ec13cc15820cede903d
+
+ Displaying notes found in: .note.FRR
+ Owner Data size Description
+ FRRouting 0x00000010 Unknown note type: (0x46455258) description data: 6c eb 15 00 00 00 00 00 74 ec 15 00 00 00 00 00
+
+Where 0x15eb6c…0x15ec74 are the offsets (relative to the note itself) where
+the xref array is in the file. Also note the owner is clearly marked as
+"FRRouting" and the type is "XREF" in hex.
+
+For SystemTap's use of ELF notes, refer to
+https://libstapsdt.readthedocs.io/en/latest/how-it-works/internals.html as an
+entry point.
+
+.. note::
+
+ Due to GCC bug 41091, the "xref_array" section is not correctly generated
+ for C++ code when compiled by GCC. A workaround is present for runtime
+ functionality, but to extract the xrefs from a C++ source file, it needs
+ to be built with clang (or a future fixed version of GCC) instead.
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index e609761e1c..cb97ee22df 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -435,10 +435,14 @@ Require policy on EBGP
.. clicmd:: [no] bgp ebgp-requires-policy
This command requires incoming and outgoing filters to be applied
- for eBGP sessions. Without the incoming filter, no routes will be
- accepted. Without the outgoing filter, no routes will be announced.
+ for eBGP sessions as part of RFC-8212 compliance. Without the incoming
+ filter, no routes will be accepted. Without the outgoing filter, no
+ routes will be announced.
- This is enabled by default.
+ This is enabled by default for the traditional configuration and
+ turned off by default for datacenter configuration.
+
+ When you enable/disable this option you MUST clear the session.
When the incoming or outgoing filter is missing you will see
"(Policy)" sign under ``show bgp summary``:
@@ -457,6 +461,22 @@ Require policy on EBGP
192.168.0.2 4 65002 8 10 0 0 0 00:03:09 5 (Policy)
fe80:1::2222 4 65002 9 11 0 0 0 00:03:09 (Policy) (Policy)
+ Additionally a `show bgp neighbor` command would indicate in the `For address family:`
+ block that:
+
+ .. code-block:: frr
+
+ exit1# show bgp neighbor
+ ...
+ For address family: IPv4 Unicast
+ Update group 1, subgroup 1
+ Packet Queue length 0
+ Inbound soft reconfiguration allowed
+ Community attribute sent to this neighbor(all)
+ Inbound updates discarded due to missing policy
+ Outbound updates discarded due to missing policy
+ 0 accepted prefixes
+
Reject routes with AS_SET or AS_CONFED_SET types
------------------------------------------------
@@ -1468,6 +1488,9 @@ Configuring Peers
directly connected and this knob is not enabled, the session will not
establish.
+ If the peer's IP address is not in the RIB and is reachable via the
+ default route, then you have to enable ``ip nht resolve-via-default``.
+
.. index:: neighbor PEER description ...
.. clicmd:: [no] neighbor PEER description ...
@@ -1967,9 +1990,9 @@ is 4 octet long. The following format is used to define the community value.
``0xFFFF029A`` ``65535:666``. :rfc:`7999` documents sending prefixes to
EBGP peers and upstream for the purpose of blackholing traffic.
Prefixes tagged with the this community should normally not be
- re-advertised from neighbors of the originating network. It is
- recommended upon receiving prefixes tagged with this community to
- add ``NO_EXPORT`` and ``NO_ADVERTISE``.
+ re-advertised from neighbors of the originating network. Upon receiving
+ ``BLACKHOLE`` community from a BGP speaker, ``NO_ADVERTISE`` community
+ is added automatically.
``no-export``
``no-export`` represents well-known communities value ``NO_EXPORT``
@@ -2740,11 +2763,11 @@ Ethernet Segments
An Ethernet Segment can be configured by specifying a system-MAC and a
local discriminatior against the bond interface on the PE (via zebra) -
-.. index:: evpn mh es-id [(1-16777215)$es_lid]
-.. clicmd:: [no] evpn mh es-id [(1-16777215)$es_lid]
+.. index:: evpn mh es-id (1-16777215)
+.. clicmd:: [no] evpn mh es-id (1-16777215)
-.. index:: evpn mh es-sys-mac [X:X:X:X:X:X$mac]
-.. clicmd:: [no$no] evpn mh es-sys-mac [X:X:X:X:X:X$mac]
+.. index:: evpn mh es-sys-mac X:X:X:X:X:X
+.. clicmd:: [no] evpn mh es-sys-mac X:X:X:X:X:X
The sys-mac and local discriminator are used for generating a 10-byte,
Type-3 Ethernet Segment ID.
@@ -2767,8 +2790,8 @@ forward BUM traffic received via the overlay network. This implementation
uses a preference based DF election specified by draft-ietf-bess-evpn-pref-df.
The DF preference is configurable per-ES (via zebra) -
-.. index:: evpn mh es-df-pref [(1-16777215)$df_pref]
-.. clicmd:: [no] evpn mh es-df-pref [(1-16777215)$df_pref]
+.. index:: evpn mh es-df-pref (1-16777215)
+.. clicmd:: [no] evpn mh es-df-pref (1-16777215)
BUM traffic is rxed via the overlay by all PEs attached to a server but
only the DF can forward the de-capsulated traffic to the access port. To
@@ -2778,6 +2801,20 @@ the traffic.
Similarly traffic received from ES peers via the overlay cannot be forwarded
to the server. This is split-horizon-filtering with local bias.
+Knobs for interop
+"""""""""""""""""
+Some vendors do not send EAD-per-EVI routes. To interop with them we
+need to relax the dependency on EAD-per-EVI routes and activate a remote
+ES-PE based on just the EAD-per-ES route.
+
+Note that by default we advertise and expect EAD-per-EVI routes.
+
+.. index:: disable-ead-evi-rx
+.. clicmd:: [no] disable-ead-evi-rx
+
+.. index:: disable-ead-evi-tx
+.. clicmd:: [no] disable-ead-evi-tx
+
Fast failover
"""""""""""""
As the primary purpose of EVPN-MH is redundancy keeping the failover efficient
@@ -2791,14 +2828,14 @@ been introduced for the express purpose of efficient ES failovers.
on via the following BGP config -
.. index:: use-es-l3nhg
-.. clicmd:: [no$no] use-es-l3nhg
+.. clicmd:: [no] use-es-l3nhg
- Local ES (MAC/Neigh) failover via ES-redirect.
On dataplanes that do not have support for ES-redirect the feature can be
turned off via the following zebra config -
.. index:: evpn mh redirect-off
-.. clicmd:: [no$no] evpn mh redirect-off
+.. clicmd:: [no] evpn mh redirect-off
Uplink/Core tracking
""""""""""""""""""""
@@ -2819,11 +2856,11 @@ the ES peer (PE2) goes down PE1 continues to advertise hosts learnt from PE2
for a holdtime during which it attempts to establish local reachability of
the host. This holdtime is configurable via the following zebra commands -
-.. index:: evpn mh neigh-holdtime (0-86400)$duration
-.. clicmd:: [no$no] evpn mh neigh-holdtime (0-86400)$duration
+.. index:: evpn mh neigh-holdtime (0-86400)
+.. clicmd:: [no] evpn mh neigh-holdtime (0-86400)
-.. index:: evpn mh mac-holdtime (0-86400)$duration
-.. clicmd:: [no$no] evpn mh mac-holdtime (0-86400)$duration
+.. index:: evpn mh mac-holdtime (0-86400)
+.. clicmd:: [no] evpn mh mac-holdtime (0-86400)
Startup delay
"""""""""""""
@@ -2832,8 +2869,8 @@ and EVPN network to converge before enabling the ESs. For this duration the
ES bonds are held protodown. The startup delay is configurable via the
following zebra command -
-.. index:: evpn mh startup-delay(0-3600)$duration
-.. clicmd:: [no] evpn mh startup-delay(0-3600)$duration
+.. index:: evpn mh startup-delay (0-3600)
+.. clicmd:: [no] evpn mh startup-delay (0-3600)
+Support with VRF network namespace backend
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -3295,8 +3332,8 @@ Some other commands provide additional options for filtering the output.
This command displays BGP routes using AS path regular expression
(:ref:`bgp-regular-expressions`).
-.. index:: show [ip] bgp [all] summary [json]
-.. clicmd:: show [ip] bgp [all] summary [json]
+.. index:: show [ip] bgp [all] summary [wide] [json]
+.. clicmd:: show [ip] bgp [all] summary [wide] [json]
Show a bgp peer summary for the specified address family.
@@ -3305,6 +3342,25 @@ and should no longer be used. In order to reach the other BGP routing tables
other than the IPv6 routing table given by :clicmd:`show bgp`, the new command
structure is extended with :clicmd:`show bgp [afi] [safi]`.
+``wide`` option gives more output like ``LocalAS`` and extended ``Desc`` to
+64 characters.
+
+ .. code-block:: frr
+
+ exit1# show ip bgp summary wide
+
+ IPv4 Unicast Summary:
+ BGP router identifier 192.168.100.1, local AS number 65534 vrf-id 0
+ BGP table version 3
+ RIB entries 5, using 920 bytes of memory
+ Peers 1, using 27 KiB of memory
+
+ Neighbor V AS LocalAS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc
+ 192.168.0.2 4 65030 123 15 22 0 0 0 00:07:00 0 1 us-east1-rs1.frrouting.org
+
+ Total number of neighbors 1
+ exit1#
+
.. index:: show bgp [afi] [safi] [all] [wide|json]
.. clicmd:: show bgp [afi] [safi] [all] [wide|json]
@@ -3437,7 +3493,7 @@ attribute.
If ``json`` option is specified, output is displayed in JSON format.
-.. index:: show bgp labelpool <chunks|inuse|ledger|requests|summary> [json]
+.. index:: show bgp labelpool <chunks|inuse|ledger|requests|summary> [json]
.. clicmd:: show bgp labelpool <chunks|inuse|ledger|requests|summary> [json]
These commands display information about the BGP labelpool used for
diff --git a/doc/user/fabricd.rst b/doc/user/fabricd.rst
index a74d3e098b..17a51ccb3c 100644
--- a/doc/user/fabricd.rst
+++ b/doc/user/fabricd.rst
@@ -57,6 +57,19 @@ in the configuration:
Configure the authentication password for a domain, as clear text or md5 one.
+.. index:: attached-bit [receive ignore | send]
+.. clicmd:: attached-bit [receive ignore | send]
+
+.. index:: attached-bit
+.. clicmd:: no attached-bit
+
+ Set attached bit for inter-area traffic:
+
+ - receive
+ If LSP received with attached bit set, create default route to neighbor
+ - send
+ If L1|L2 router, set attached bit in LSP sent to L1 router
+
.. index:: log-adjacency-changes
.. clicmd:: log-adjacency-changes
@@ -64,7 +77,7 @@ in the configuration:
.. clicmd:: no log-adjacency-changes
Log changes in adjacency state.
-
+
.. index:: set-overload-bit
.. clicmd:: set-overload-bit
diff --git a/doc/user/index.rst b/doc/user/index.rst
index 993acf3b4c..7b9464668b 100644
--- a/doc/user/index.rst
+++ b/doc/user/index.rst
@@ -29,6 +29,7 @@ Basics
ipv6
kernel
snmp
+ scripting
.. modules
#########
diff --git a/doc/user/installation.rst b/doc/user/installation.rst
index 382d71b71f..a13e6ce43b 100644
--- a/doc/user/installation.rst
+++ b/doc/user/installation.rst
@@ -362,6 +362,10 @@ options from the list below.
Set hardcoded rpaths in the executable [default=yes].
+.. option:: --enable-scripting
+
+ Enable Lua scripting [default=no].
+
You may specify any combination of the above options to the configure
script. By default, the executables are placed in :file:`/usr/local/sbin`
and the configuration files in :file:`/usr/local/etc`. The :file:`/usr/local/`
@@ -382,6 +386,10 @@ options to the configuration script.
Configure zebra to use `dir` for local state files, such as pid files and
unix sockets.
+.. option:: --with-scriptdir <dir>
+
+ Look for Lua scripts in ``dir`` [``prefix``/etc/frr/scripts].
+
.. option:: --with-yangmodelsdir <dir>
Look for YANG modules in `dir` [`prefix`/share/yang]. Note that the FRR
diff --git a/doc/user/isisd.rst b/doc/user/isisd.rst
index 7e198564b5..352701728d 100644
--- a/doc/user/isisd.rst
+++ b/doc/user/isisd.rst
@@ -72,6 +72,19 @@ writing, *isisd* does not support multiple ISIS processes.
Configure the authentication password for an area, respectively a domain, as
clear text or md5 one.
+.. index:: attached-bit [receive ignore | send]
+.. clicmd:: attached-bit [receive ignore | send]
+
+.. index:: attached-bit
+.. clicmd:: no attached-bit
+
+ Set attached bit for inter-area traffic:
+
+ - receive
+ If LSP received with attached bit set, create default route to neighbor
+ - send
+ If L1|L2 router, set attached bit in LSP sent to L1 router
+
.. index:: log-adjacency-changes
.. clicmd:: log-adjacency-changes
diff --git a/doc/user/nhrpd.rst b/doc/user/nhrpd.rst
index 9caeb0eedb..65645c519d 100644
--- a/doc/user/nhrpd.rst
+++ b/doc/user/nhrpd.rst
@@ -180,14 +180,15 @@ Integration with IKE
nhrpd needs tight integration with IKE daemon for various reasons.
Currently only strongSwan is supported as IKE daemon.
-nhrpd connects to strongSwan using VICI protocol based on UNIX socket
-(hardcoded now as /var/run/charon.vici).
+nhrpd connects to strongSwan using VICI protocol based on UNIX socket which
+can be configured using the command below (default to /var/run/charon.vici).
strongSwan currently needs few patches applied. Please check out the
-https://git.alpinelinux.org/user/tteras/strongswan/log/?h=tteras-release
-and
-https://git.alpinelinux.org/user/tteras/strongswan/log/?h=tteras
-git repositories for the patches.
+original patches at:
+https://git-old.alpinelinux.org/user/tteras/strongswan/
+
+Actively maintained patches are also available at:
+https://gitlab.alpinelinux.org/alpine/aports/-/tree/master/main/strongswan
.. _nhrp-events:
diff --git a/doc/user/ospf6d.rst b/doc/user/ospf6d.rst
index 4f0ff90943..99119bb7e5 100644
--- a/doc/user/ospf6d.rst
+++ b/doc/user/ospf6d.rst
@@ -83,6 +83,12 @@ OSPF6 router
This configuration setting MUST be consistent across all routers
within the OSPF domain.
+.. index:: maximum-paths (1-64)
+.. clicmd::[no] maximum-paths (1-64)
+
+ Use this command to control the maximum number of parallel routes that
+ OSPFv3 can support. The default is 64.
+
.. _ospf6-area:
OSPF6 area
@@ -170,10 +176,34 @@ Showing OSPF6 information
instance ID, simply type "show ipv6 ospf6 <cr>". JSON output can be
obtained by appending 'json' to the end of command.
-.. index:: show ipv6 ospf6 database
-.. clicmd:: show ipv6 ospf6 database
+.. index:: show ipv6 ospf6 database [<detail|dump|internal>] [json]
+.. clicmd:: show ipv6 ospf6 database [<detail|dump|internal>] [json]
+
+ This command shows LSAs present in the LSDB. There are three view options.
+ These options helps in viewing all the parameters of the LSAs. JSON output
+ can be obtained by appending 'json' to the end of command. JSON option is
+ not applicable with 'dump' option.
+
+.. index:: show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> [json]
+.. clicmd:: show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> [json]
+
+ These options filters out the LSA based on its type. The three views options
+ works here as well. JSON output can be obtained by appending 'json' to the
+ end of command.
+
+.. index:: show ipv6 ospf6 database adv-router A.B.C.D linkstate-id A.B.C.D [json]
+.. clicmd:: show ipv6 ospf6 database adv-router A.B.C.D linkstate-id A.B.C.D [json]
+
+ The LSAs additinally can also be filtered with the linkstate-id and
+ advertising-router fields. We can use the LSA type filter and views with
+ this command as well and visa-versa. JSON output can be obtained by
+ appending 'json' to the end of command.
- This command shows LSA database summary. You can specify the type of LSA.
+.. index:: show ipv6 ospf6 database self-originated [json]
+.. clicmd:: show ipv6 ospf6 database self-originated [json]
+
+ This command is used to filter the LSAs which are originated by the present
+ router. All the other filters are applicable here as well.
.. index:: show ipv6 ospf6 interface [json]
.. clicmd:: show ipv6 ospf6 interface [json]
@@ -216,6 +246,28 @@ Showing OSPF6 information
Shows the routes which are redistributed by the router. JSON output can
be obtained by appending 'json' at the end.
+.. index:: show ipv6 ospf6 route [<intra-area|inter-area|external-1|external-2|X:X::X:X|X:X::X:X/M|detail|summary>] [json]
+.. clicmd:: show ipv6 ospf6 route [<intra-area|inter-area|external-1|external-2|X:X::X:X|X:X::X:X/M|detail|summary>] [json]
+
+ This command displays the ospfv3 routing table as determined by the most
+ recent SPF calculations. Options are provided to view the different types
+ of routes. Other than the standard view there are two other options, detail
+ and summary. JSON output can be obtained by appending 'json' to the end of
+ command.
+
+.. index:: show ipv6 ospf6 route X:X::X:X/M match [detail] [json]
+.. clicmd:: show ipv6 ospf6 route X:X::X:X/M match [detail] [json]
+
+ The additional match option will match the given address to the destination
+ of the routes, and return the result accordingly.
+
+.. index:: show ipv6 ospf6 interface [IFNAME] prefix [detail|<X:X::X:X|X:X::X:X/M> [<match|detail>]] [json]
+.. clicmd:: show ipv6 ospf6 interface [IFNAME] prefix [detail|<X:X::X:X|X:X::X:X/M> [<match|detail>]] [json]
+
+ This command shows the prefixes present in the interface routing table.
+ Interface name can also be given. JSON output can be obtained by appending
+ 'json' to the end of command.
+
OSPF6 Configuration Examples
============================
diff --git a/doc/user/ospfd.rst b/doc/user/ospfd.rst
index 7184a0e197..ee02a9dae5 100644
--- a/doc/user/ospfd.rst
+++ b/doc/user/ospfd.rst
@@ -1233,6 +1233,20 @@ Summary Route will be originated on-behalf of all matched external LSAs.
Show configuration for display all configured summary routes with
matching external LSA information.
+TI-LFA
+======
+
+Experimental support for Topology Independent LFA (Loop-Free Alternate), see
+for example 'draft-bashandy-rtgwg-segment-routing-ti-lfa-05'. Note that
+TI-LFA requires a proper Segment Routing configuration.
+
+.. index:: fast-reroute ti-lfa [node-protection]
+.. clicmd:: fast-reroute ti-lfa [node-protection]
+
+ Configured on the router level. Activates TI-LFA for all interfaces.
+
+ Note that so far only P2P interfaces are supported.
+
Debugging OSPF
==============
diff --git a/doc/user/pbr.rst b/doc/user/pbr.rst
index c869c6bc45..5cec7cbe62 100644
--- a/doc/user/pbr.rst
+++ b/doc/user/pbr.rst
@@ -258,6 +258,21 @@ causes the policy to be installed into the kernel.
| valid | Is the map well-formed? | Boolean |
+--------+----------------------------+---------+
+.. _pbr-debugs:
+
+PBR Debugs
+===========
+
+.. index:: debug pbr
+.. clicmd:: debug pbr events|map|nht|zebra
+
+ Debug pbr in pbrd daemon. You specify what types of debugs to turn on.
+
+.. index:: debug zebra pbr
+.. clicmd:: debug zebra pbr
+
+ Debug pbr in zebra daemon.
+
.. _pbr-details:
PBR Details
diff --git a/doc/user/pim.rst b/doc/user/pim.rst
index 05297a0609..201fe2f9ed 100644
--- a/doc/user/pim.rst
+++ b/doc/user/pim.rst
@@ -389,10 +389,11 @@ cause great confusion.
Display IGMP interface information.
-.. index:: show ip igmp join
-.. clicmd:: show ip igmp join
+.. index:: show ip igmp [vrf NAME] join [json]
+.. clicmd:: show ip igmp [vrf NAME] join [json]
- Display IGMP static join information.
+ Display IGMP static join information for a specific vrf.
+ If "vrf all" is provided, it displays information for all the vrfs present.
.. index:: show ip igmp groups
.. clicmd:: show ip igmp groups
diff --git a/doc/user/scripting.rst b/doc/user/scripting.rst
new file mode 100644
index 0000000000..b0295e5706
--- /dev/null
+++ b/doc/user/scripting.rst
@@ -0,0 +1,28 @@
+.. _scripting:
+
+*********
+Scripting
+*********
+
+The behavior of FRR may be extended or customized using its built-in scripting
+capabilities.
+
+Some configuration commands accept the name of a Lua script to call to perform
+some task or make some decision. These scripts have their environments
+populated with some set of inputs, and are expected to populate some set of
+output variables, which are read by FRR after the script completes. The names
+and expected contents of these scripts are documented alongside the commands
+that support them.
+
+These scripts live in :file:`/etc/frr/scripts/` by default. This is
+configurable at compile time via ``--with-scriptdir``. It may be
+overriden at runtime with the ``--scriptdir`` daemon option.
+
+In order to use scripting, FRR must be built with ``--enable-scripting``.
+
+.. note::
+
+ Scripts are typically loaded just-in-time. This means you can change the
+ contents of a script that is in use without restarting FRR. Not all
+ scripting locations may behave this way; refer to the documentation for the
+ particular location.
diff --git a/doc/user/setup.rst b/doc/user/setup.rst
index b2b71cf012..64a33765c2 100644
--- a/doc/user/setup.rst
+++ b/doc/user/setup.rst
@@ -240,3 +240,53 @@ because FRR's monitoring program cannot currently distinguish between a crashed
The closest that can be achieved is to remove all configuration for the daemon,
and set its line in ``/etc/frr/daemons`` to ``=no``. Once this is done, the
daemon will be stopped the next time FRR is restarted.
+
+
+Network Namespaces
+^^^^^^^^^^^^^^^^^^
+
+It is possible to run FRR in different network namespaces so it can be
+further compartmentalized (e.g. confining to a smaller subset network).
+The network namespace configuration can be used in the default FRR
+configuration pathspace or it can be used in a different pathspace
+(`-N/--pathspace`).
+
+To use FRR network namespace in the default pathspace you should add
+or uncomment the ``watchfrr_options`` line in ``/etc/frr/daemons``:
+
+.. code-block:: diff
+
+ - #watchfrr_options="--netns"
+ + watchfrr_options="--netns=<network-namespace-name>"
+
+If you want to use a different pathspace with the network namespace
+(the recommended way) you should add/uncomment the ``watchfrr_options``
+line in ``/etc/frr/<namespace>/daemons``:
+
+.. code-block:: diff
+
+ - #watchfrr_options="--netns"
+ + #watchfrr_options="--netns=<network-namespace-name>"
+ +
+ + # `--netns` argument is optional and if not provided it will
+ + # default to the pathspace name.
+ + watchfrr_options="--netns"
+
+To start FRR in the new pathspace+network namespace the initialization script
+should be called with an extra parameter:
+
+
+.. code::
+
+ /etc/init.d/frr start <pathspace-name>
+
+
+.. note::
+
+ Some Linux distributions might not use the default init script
+ shipped with FRR, in that case you might want to try running the
+ bundled script in ``/usr/lib/frr/frrinit.sh``.
+
+ On systemd you might create different units or parameterize the
+ existing one. See the man page:
+ https://www.freedesktop.org/software/systemd/man/systemd.unit.html
diff --git a/doc/user/subdir.am b/doc/user/subdir.am
index a78d261863..3585245e85 100644
--- a/doc/user/subdir.am
+++ b/doc/user/subdir.am
@@ -35,6 +35,7 @@ user_RSTFILES = \
doc/user/routemap.rst \
doc/user/routeserver.rst \
doc/user/rpki.rst \
+ doc/user/scripting.rst \
doc/user/setup.rst \
doc/user/sharp.rst \
doc/user/snmp.rst \
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index 91cd205bed..a9979558c3 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -407,6 +407,14 @@ If no option is chosen, then the *Linux VRF* implementation as references in
https://www.kernel.org/doc/Documentation/networking/vrf.txt will be mapped over
the *Zebra* VRF. The routing table associated to that VRF is a Linux table
identifier located in the same *Linux network namespace* where *Zebra* started.
+Please note when using the *Linux VRF* routing table it is expected that a
+default Kernel route will be installed that has a metric as outlined in the
+www.kernel.org doc above. The Linux Kernel does table lookup via a combination
+of rule application of the rule table and then route lookup of the specified
+table. If no route match is found then the next applicable rule is applied
+to find the next route table to use to look for a route match. As such if
+your VRF table does not have a default blackhole route with a high metric
+VRF route lookup will leave the table specified by the VRF, which is undesirable.
If the :option:`-n` option is chosen, then the *Linux network namespace* will
be mapped over the *Zebra* VRF. That implies that *Zebra* is able to configure
@@ -759,6 +767,12 @@ IPv6 example for OSPFv3.
not created at startup. On Debian, FRR might start before ifupdown
completes. Consider a reboot test.
+.. index:: zebra route-map delay-timer (0-600)
+.. clicmd:: [no] zebra route-map delay-timer (0-600)
+
+ Set the delay before any route-maps are processed in zebra. The
+ default time for this is 5 seconds.
+
.. _zebra-fib-push-interface:
zebra FIB push interface
diff --git a/docker/alpine/Dockerfile b/docker/alpine/Dockerfile
index ed6453e2b1..126710f8c2 100644
--- a/docker/alpine/Dockerfile
+++ b/docker/alpine/Dockerfile
@@ -1,10 +1,9 @@
# This stage builds a dist tarball from the source
-FROM alpine:edge as source-builder
+FROM alpine:latest as source-builder
RUN mkdir -p /src/alpine
COPY alpine/APKBUILD.in /src/alpine
RUN source /src/alpine/APKBUILD.in \
- && echo 'http://dl-cdn.alpinelinux.org/alpine/edge/testing' >> /etc/apk/repositories \
&& apk add \
--no-cache \
--update-cache \
@@ -22,10 +21,9 @@ RUN cd /src \
&& make dist
# This stage builds an apk from the dist tarball
-FROM alpine:edge as alpine-builder
+FROM alpine:latest as alpine-builder
# Don't use nocache here so that abuild can use the cache
-RUN echo 'http://dl-cdn.alpinelinux.org/alpine/edge/testing' >> /etc/apk/repositories \
- && apk add \
+RUN apk add \
--update-cache \
abuild \
alpine-conf \
@@ -46,11 +44,10 @@ RUN cd /dist \
&& abuild -r -P /pkgs/apk
# This stage installs frr from the apk
-FROM alpine:edge
+FROM alpine:latest
RUN mkdir -p /pkgs/apk
COPY --from=alpine-builder /pkgs/apk/ /pkgs/apk/
-RUN echo 'http://dl-cdn.alpinelinux.org/alpine/edge/testing' >> /etc/apk/repositories \
- && apk add \
+RUN apk add \
--no-cache \
--update-cache \
tini \
diff --git a/docker/centos-7/Dockerfile b/docker/centos-7/Dockerfile
index cca8baa147..a92326fcf3 100644
--- a/docker/centos-7/Dockerfile
+++ b/docker/centos-7/Dockerfile
@@ -5,8 +5,8 @@ RUN yum install -y rpm-build autoconf automake libtool make \
readline-devel texinfo net-snmp-devel groff pkgconfig \
json-c-devel pam-devel bison flex pytest c-ares-devel \
python3-devel python3-sphinx systemd-devel libcap-devel \
- https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/CentOS-7-x86_64-Packages/libyang-0.16.111-0.x86_64.rpm \
- https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/CentOS-7-x86_64-Packages/libyang-devel-0.16.111-0.x86_64.rpm \
+ https://ci1.netdef.org/artifact/LIBYANG-LY1REL/shared/build-4/CentOS-7-x86_64-Packages/libyang1-1.0.184-0.x86_64.rpm \
+ https://ci1.netdef.org/artifact/LIBYANG-LY1REL/shared/build-4/CentOS-7-x86_64-Packages/libyang-devel-1.0.184-0.x86_64.rpm \
https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-0.7.0-1.el7.centos.x86_64.rpm \
https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-devel-0.7.0-1.el7.centos.x86_64.rpm
@@ -32,7 +32,7 @@ RUN echo '%_smp_mflags %( echo "-j$(/usr/bin/getconf _NPROCESSORS_ONLN)"; )' >>
# This stage installs frr from the rpm
FROM centos:centos7
RUN mkdir -p /pkgs/rpm \
- && yum install -y https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/CentOS-7-x86_64-Packages/libyang-0.16.111-0.x86_64.rpm \
+ && yum install -y https://ci1.netdef.org/artifact/LIBYANG-LY1REL/shared/build-4/CentOS-7-x86_64-Packages/libyang1-1.0.184-0.x86_64.rpm \
https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-0.7.0-1.el7.centos.x86_64.rpm
COPY --from=centos-7-builder /rpmbuild/RPMS/ /pkgs/rpm/
diff --git a/docker/centos-8/Dockerfile b/docker/centos-8/Dockerfile
index 6c1f873589..7ed7948927 100644
--- a/docker/centos-8/Dockerfile
+++ b/docker/centos-8/Dockerfile
@@ -1,17 +1,15 @@
# This stage builds an rpm from the source
FROM centos:centos8 as centos-8-builder
-RUN dnf install --enablerepo=PowerTools -y rpm-build git autoconf pcre-devel \
+RUN dnf install --enablerepo=powertools -y rpm-build git autoconf pcre-devel \
automake libtool make readline-devel texinfo net-snmp-devel pkgconfig \
- groff pkgconfig json-c-devel pam-devel bison flex python2-pytest \
- c-ares-devel python2-devel systemd-devel libcap-devel platform-python-devel \
- https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/CentOS-7-x86_64-Packages/libyang-0.16.111-0.x86_64.rpm \
- https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/CentOS-7-x86_64-Packages/libyang-devel-0.16.111-0.x86_64.rpm \
+ groff pkgconfig json-c-devel pam-devel bison flex python3-pytest \
+ c-ares-devel python3-devel python3-sphinx systemd-devel libcap-devel platform-python-devel \
+ https://ci1.netdef.org/artifact/LIBYANG-LY1REL/shared/build-4/CentOS-8-x86_64-Packages/libyang1-1.0.184-0.x86_64.rpm \
+ https://ci1.netdef.org/artifact/LIBYANG-LY1REL/shared/build-4/CentOS-8-x86_64-Packages/libyang-devel-1.0.184-0.x86_64.rpm \
https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-0.7.0-1.el7.centos.x86_64.rpm \
https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-devel-0.7.0-1.el7.centos.x86_64.rpm
-RUN pip2 install sphinx
-
COPY . /src
ARG PKGVER
@@ -35,7 +33,7 @@ RUN echo '%_smp_mflags %( echo "-j$(/usr/bin/getconf _NPROCESSORS_ONLN)"; )' >>
# This stage installs frr from the rpm
FROM centos:centos8
RUN mkdir -p /pkgs/rpm \
- && yum install -y https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/CentOS-7-x86_64-Packages/libyang-0.16.111-0.x86_64.rpm \
+ && yum install -y https://ci1.netdef.org/artifact/LIBYANG-LY1REL/shared/build-4/CentOS-8-x86_64-Packages/libyang1-1.0.184-0.x86_64.rpm \
https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-0.7.0-1.el7.centos.x86_64.rpm
COPY --from=centos-8-builder /rpmbuild/RPMS/ /pkgs/rpm/
diff --git a/eigrpd/eigrp_filter.c b/eigrpd/eigrp_filter.c
index 009b57e05f..c77a6fc1b1 100644
--- a/eigrpd/eigrp_filter.c
+++ b/eigrpd/eigrp_filter.c
@@ -124,39 +124,6 @@ void eigrp_distribute_update(struct distribute_ctx *ctx,
} else
e->prefix[EIGRP_FILTER_OUT] = NULL;
-// This is commented out, because the distribute.[ch] code
-// changes looked poorly written from first glance
-// commit was 133bdf2d
-// TODO: DBS
-#if 0
- /* route-map IN for whole process */
- if (dist->route[DISTRIBUTE_V4_IN])
- {
- routemap = route_map_lookup_by_name (dist->route[DISTRIBUTE_V4_IN]);
- if (routemap)
- e->routemap[EIGRP_FILTER_IN] = routemap;
- else
- e->routemap[EIGRP_FILTER_IN] = NULL;
- }
- else
- {
- e->routemap[EIGRP_FILTER_IN] = NULL;
- }
-
- /* route-map OUT for whole process */
- if (dist->route[DISTRIBUTE_V4_OUT])
- {
- routemap = route_map_lookup_by_name (dist->route[DISTRIBUTE_V4_OUT]);
- if (routemap)
- e->routemap[EIGRP_FILTER_OUT] = routemap;
- else
- e->routemap[EIGRP_FILTER_OUT] = NULL;
- }
- else
- {
- e->routemap[EIGRP_FILTER_OUT] = NULL;
- }
-#endif
// TODO: check Graceful restart after 10sec
/* cancel GR scheduled */
@@ -232,36 +199,6 @@ void eigrp_distribute_update(struct distribute_ctx *ctx,
} else
ei->prefix[EIGRP_FILTER_OUT] = NULL;
-#if 0
- /* route-map IN for whole process */
- if (dist->route[DISTRIBUTE_V4_IN])
- {
- zlog_info("<DEBUG ACL ALL in");
- routemap = route_map_lookup_by_name (dist->route[DISTRIBUTE_V4_IN]);
- if (routemap)
- ei->routemap[EIGRP_FILTER_IN] = routemap;
- else
- ei->routemap[EIGRP_FILTER_IN] = NULL;
- }
- else
- {
- ei->routemap[EIGRP_FILTER_IN] = NULL;
- }
-
- /* route-map OUT for whole process */
- if (dist->route[DISTRIBUTE_V4_OUT])
- {
- routemap = route_map_lookup_by_name (dist->route[DISTRIBUTE_V4_OUT]);
- if (routemap)
- ei->routemap[EIGRP_FILTER_OUT] = routemap;
- else
- ei->routemap[EIGRP_FILTER_OUT] = NULL;
- }
- else
- {
- ei->routemap[EIGRP_FILTER_OUT] = NULL;
- }
-#endif
// TODO: check Graceful restart after 10sec
/* Cancel GR scheduled */
diff --git a/eigrpd/eigrp_interface.c b/eigrpd/eigrp_interface.c
index 74eff958da..bb7a930e6d 100644
--- a/eigrpd/eigrp_interface.c
+++ b/eigrpd/eigrp_interface.c
@@ -229,6 +229,20 @@ void eigrp_del_if_params(struct eigrp_if_params *eip)
free(eip->auth_keychain);
}
+/*
+ * Set the network byte order of the 3 bytes we send
+ * of the mtu of the link.
+ */
+static void eigrp_mtu_convert(struct eigrp_metrics *metric, uint32_t host_mtu)
+{
+ uint32_t network_mtu = htonl(host_mtu);
+ uint8_t *nm = (uint8_t *)&network_mtu;
+
+ metric->mtu[0] = nm[1];
+ metric->mtu[1] = nm[2];
+ metric->mtu[2] = nm[3];
+}
+
int eigrp_if_up(struct eigrp_interface *ei)
{
struct eigrp_prefix_descriptor *pe;
@@ -256,9 +270,7 @@ int eigrp_if_up(struct eigrp_interface *ei)
metric.delay = eigrp_delay_to_scaled(ei->params.delay);
metric.load = ei->params.load;
metric.reliability = ei->params.reliability;
- metric.mtu[0] = 0xDC;
- metric.mtu[1] = 0x05;
- metric.mtu[2] = 0x00;
+ eigrp_mtu_convert(&metric, ei->ifp->mtu);
metric.hop_count = 0;
metric.flags = 0;
metric.tag = 0;
diff --git a/eigrpd/eigrp_structs.h b/eigrpd/eigrp_structs.h
index cddab57dd5..0d8bb29964 100644
--- a/eigrpd/eigrp_structs.h
+++ b/eigrpd/eigrp_structs.h
@@ -40,7 +40,7 @@
struct eigrp_metrics {
uint32_t delay;
uint32_t bandwidth;
- unsigned char mtu[3];
+ uint8_t mtu[3];
uint8_t hop_count;
uint8_t reliability;
uint8_t load;
diff --git a/eigrpd/eigrp_topology.c b/eigrpd/eigrp_topology.c
index 1b7e9fc15b..6da7756f84 100644
--- a/eigrpd/eigrp_topology.c
+++ b/eigrpd/eigrp_topology.c
@@ -508,6 +508,7 @@ void eigrp_topology_neighbor_down(struct eigrp *eigrp,
if (entry->adv_router != nbr)
continue;
+ memset(&msg, 0, sizeof(msg));
msg.metrics.delay = EIGRP_MAX_METRIC;
msg.packet_type = EIGRP_OPC_UPDATE;
msg.eigrp = eigrp;
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 50011d55ec..fb79481cb2 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -293,15 +293,4 @@ struct br_mcast_stats {
__u64 mcast_bytes[BR_MCAST_DIR_SIZE];
__u64 mcast_packets[BR_MCAST_DIR_SIZE];
};
-
-/* FDB notification bits for NDA_NOTIFY:
- * - BR_FDB_NFY_STATIC - notify on activity/expire even for a static entry
- * - BR_FDB_NFY_INACTIVE - mark as inactive to avoid double notification,
- * used with BR_FDB_NFY_STATIC (kernel controlled)
- */
-enum {
- BR_FDB_NFY_STATIC,
- BR_FDB_NFY_INACTIVE,
- BR_FDB_NFY_MAX
-};
#endif /* _UAPI_LINUX_IF_BRIDGE_H */
diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h
index c06fe708f7..581af73c7f 100644
--- a/include/linux/neighbour.h
+++ b/include/linux/neighbour.h
@@ -30,7 +30,7 @@ enum {
NDA_SRC_VNI,
NDA_PROTOCOL, /* Originator of entry */
NDA_NH_ID,
- NDA_NOTIFY,
+ NDA_FDB_EXT_ATTRS,
NDA_EXT_FLAGS,
__NDA_MAX
};
@@ -178,4 +178,27 @@ enum {
};
#define NDTA_MAX (__NDTA_MAX - 1)
+/* FDB activity notification bits used in NFEA_ACTIVITY_NOTIFY:
+ * - FDB_NOTIFY_BIT - notify on activity/expire for any entry
+ * - FDB_NOTIFY_INACTIVE_BIT - mark as inactive to avoid multiple notifications
+ */
+enum {
+ FDB_NOTIFY_BIT = (1 << 0),
+ FDB_NOTIFY_INACTIVE_BIT = (1 << 1)
+};
+
+/* embedded into NDA_FDB_EXT_ATTRS:
+ * [NDA_FDB_EXT_ATTRS] = {
+ * [NFEA_ACTIVITY_NOTIFY]
+ * ...
+ * }
+ */
+enum {
+ NFEA_UNSPEC,
+ NFEA_ACTIVITY_NOTIFY,
+ NFEA_DONT_REFRESH,
+ __NFEA_MAX
+};
+#define NFEA_MAX (__NFEA_MAX - 1)
+
#endif
diff --git a/isisd/isis_adjacency.h b/isisd/isis_adjacency.h
index 3c3a211a52..2780d826f5 100644
--- a/isisd/isis_adjacency.h
+++ b/isisd/isis_adjacency.h
@@ -139,5 +139,6 @@ void isis_adj_print_vty(struct isis_adjacency *adj, struct vty *vty,
void isis_adj_build_neigh_list(struct list *adjdb, struct list *list);
void isis_adj_build_up_list(struct list *adjdb, struct list *list);
int isis_adj_usage2levels(enum isis_adj_usage usage);
+int isis_bfd_startup_timer(struct thread *thread);
#endif /* ISIS_ADJACENCY_H */
diff --git a/isisd/isis_bfd.c b/isisd/isis_bfd.c
index f81dd6cf51..4fac73511b 100644
--- a/isisd/isis_bfd.c
+++ b/isisd/isis_bfd.c
@@ -146,11 +146,11 @@ static void bfd_adj_event(struct isis_adjacency *adj, struct prefix *dst,
static int isis_bfd_interface_dest_update(ZAPI_CALLBACK_ARGS)
{
struct interface *ifp;
- struct prefix dst_ip;
+ struct prefix dst_ip, src_ip;
int status;
- ifp = bfd_get_peer_info(zclient->ibuf, &dst_ip, NULL, &status,
- NULL, vrf_id);
+ ifp = bfd_get_peer_info(zclient->ibuf, &dst_ip, &src_ip, &status, NULL,
+ vrf_id);
if (!ifp || (dst_ip.family != AF_INET && dst_ip.family != AF_INET6))
return 0;
@@ -329,6 +329,13 @@ static void bfd_handle_adj_up(struct isis_adjacency *adj, int command)
if (!circuit->bfd_info)
goto out;
+ /* If IS-IS IPv6 is configured wait for IPv6 address to be programmed
+ * before starting up BFD
+ */
+ if ((circuit->ipv6_router && listcount(circuit->ipv6_link) == 0)
+ || adj->ipv6_address_count == 0)
+ return;
+
/*
* If IS-IS is enabled for both IPv4 and IPv6 on the circuit, prefer
* creating a BFD session over IPv6.
@@ -443,6 +450,44 @@ static int bfd_circuit_write_settings(struct isis_circuit *circuit,
}
#endif
+static int bfd_handle_adj_ip_enabled(struct isis_adjacency *adj, int family)
+{
+
+ if (family != AF_INET6)
+ return 0;
+
+ if (adj->bfd_session)
+ return 0;
+
+ if (adj->adj_state != ISIS_ADJ_UP)
+ return 0;
+
+ bfd_handle_adj_up(adj, ZEBRA_BFD_DEST_REGISTER);
+
+ return 0;
+}
+
+static int bfd_handle_circuit_add_addr(struct isis_circuit *circuit)
+{
+ struct isis_adjacency *adj;
+ struct listnode *node;
+
+ if (circuit->area == 0)
+ return 0;
+
+ for (ALL_LIST_ELEMENTS_RO(circuit->area->adjacency_list, node, adj)) {
+ if (adj->bfd_session)
+ continue;
+
+ if (adj->adj_state != ISIS_ADJ_UP)
+ continue;
+
+ bfd_handle_adj_up(adj, ZEBRA_BFD_DEST_REGISTER);
+ }
+
+ return 0;
+}
+
void isis_bfd_init(void)
{
bfd_gbl_init();
@@ -457,4 +502,6 @@ void isis_bfd_init(void)
hook_register(isis_circuit_config_write,
bfd_circuit_write_settings);
#endif
+ hook_register(isis_adj_ip_enabled_hook, bfd_handle_adj_ip_enabled);
+ hook_register(isis_circuit_add_addr_hook, bfd_handle_circuit_add_addr);
}
diff --git a/isisd/isis_bpf.c b/isisd/isis_bpf.c
index 454da99e09..88c3bfa63c 100644
--- a/isisd/isis_bpf.c
+++ b/isisd/isis_bpf.c
@@ -67,12 +67,6 @@ uint8_t *readbuff = NULL;
static const uint8_t ALL_L1_ISS[6] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x14};
static const uint8_t ALL_L2_ISS[6] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x15};
-#if 0
-/* missing support for P2P-over-LAN / ES-IS on BSD */
-static const uint8_t ALL_ISS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x05};
-static const uint8_t ALL_ESS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x04};
-#endif
-
static char sock_buff[16384];
static int open_bpf_dev(struct isis_circuit *circuit)
diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c
index 2580a7c43a..4aac3f8880 100644
--- a/isisd/isis_circuit.c
+++ b/isisd/isis_circuit.c
@@ -252,6 +252,9 @@ struct isis_circuit *circuit_scan_by_ifp(struct interface *ifp)
return circuit_lookup_by_ifp(ifp, isis->init_circ_list);
}
+DEFINE_HOOK(isis_circuit_add_addr_hook, (struct isis_circuit *circuit),
+ (circuit))
+
void isis_circuit_add_addr(struct isis_circuit *circuit,
struct connected *connected)
{
@@ -322,6 +325,9 @@ void isis_circuit_add_addr(struct isis_circuit *circuit,
connected->address, circuit->interface->name);
#endif /* EXTREME_DEBUG */
}
+
+ hook_call(isis_circuit_add_addr_hook, circuit);
+
return;
}
diff --git a/isisd/isis_circuit.h b/isisd/isis_circuit.h
index 9a8982dc06..3387232da2 100644
--- a/isisd/isis_circuit.h
+++ b/isisd/isis_circuit.h
@@ -225,4 +225,7 @@ DECLARE_HOOK(isis_circuit_config_write,
(circuit, vty))
#endif
+DECLARE_HOOK(isis_circuit_add_addr_hook, (struct isis_circuit *circuit),
+ (circuit))
+
#endif /* _ZEBRA_ISIS_CIRCUIT_H */
diff --git a/isisd/isis_cli.c b/isisd/isis_cli.c
index 5ca70eab0f..b48da9312f 100644
--- a/isisd/isis_cli.c
+++ b/isisd/isis_cli.c
@@ -593,6 +593,10 @@ void cli_show_isis_overload(struct vty *vty, struct lyd_node *dnode,
vty_out(vty, " set-overload-bit\n");
}
+#if CONFDATE > 20220119
+CPP_NOTICE(
+ "Use of `set-attached-bit` is deprecated please use attached-bit [send | receive]")
+#endif
/*
* XPath: /frr-isisd:isis/instance/attached
*/
@@ -600,18 +604,57 @@ DEFPY_YANG(set_attached_bit, set_attached_bit_cmd, "[no] set-attached-bit",
"Reset attached bit\n"
"Set attached bit to identify as L1/L2 router for inter-area traffic\n")
{
- nb_cli_enqueue_change(vty, "./attached", NB_OP_MODIFY,
+ vty_out(vty,
+ "set-attached-bit deprecated please use attached-bit [send | receive]\n");
+
+ return CMD_SUCCESS;
+}
+
+/*
+ * XPath: /frr-isisd:isis/instance/attach-send
+ */
+DEFPY_YANG(attached_bit_send, attached_bit_send_cmd, "[no] attached-bit send",
+ "Reset attached bit\n"
+ "Set attached bit for inter-area traffic\n"
+ "Set attached bit in LSP sent to L1 router\n")
+{
+ nb_cli_enqueue_change(vty, "./attach-send", NB_OP_MODIFY,
no ? "false" : "true");
return nb_cli_apply_changes(vty, NULL);
}
-void cli_show_isis_attached(struct vty *vty, struct lyd_node *dnode,
- bool show_defaults)
+void cli_show_isis_attached_send(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults)
+{
+ if (!yang_dnode_get_bool(dnode, NULL))
+ vty_out(vty, " no");
+ vty_out(vty, " attached-bit send\n");
+}
+
+/*
+ * XPath: /frr-isisd:isis/instance/attach-receive-ignore
+ */
+DEFPY_YANG(
+ attached_bit_receive_ignore, attached_bit_receive_ignore_cmd,
+ "[no] attached-bit receive ignore",
+ "Reset attached bit\n"
+ "Set attach bit for inter-area traffic\n"
+ "If LSP received with attached bit set, create default route to neighbor\n"
+ "Do not process attached bit\n")
+{
+ nb_cli_enqueue_change(vty, "./attach-receive-ignore", NB_OP_MODIFY,
+ no ? "false" : "true");
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+void cli_show_isis_attached_receive(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults)
{
if (!yang_dnode_get_bool(dnode, NULL))
vty_out(vty, " no");
- vty_out(vty, " set-attached-bit\n");
+ vty_out(vty, " attached-bit receive ignore\n");
}
/*
@@ -3206,6 +3249,8 @@ void isis_cli_init(void)
install_element(ISIS_NODE, &set_overload_bit_cmd);
install_element(ISIS_NODE, &set_attached_bit_cmd);
+ install_element(ISIS_NODE, &attached_bit_send_cmd);
+ install_element(ISIS_NODE, &attached_bit_receive_ignore_cmd);
install_element(ISIS_NODE, &metric_style_cmd);
install_element(ISIS_NODE, &no_metric_style_cmd);
diff --git a/isisd/isis_constants.h b/isisd/isis_constants.h
index 25eae06cb0..3d6a20ee66 100644
--- a/isisd/isis_constants.h
+++ b/isisd/isis_constants.h
@@ -140,7 +140,7 @@
* LSP bit masks
*/
#define LSPBIT_P 0x80
-#define LSPBIT_ATT 0x78
+#define LSPBIT_ATT 0x08 /* only use the Default ATT bit */
#define LSPBIT_OL 0x04
#define LSPBIT_IST 0x03
@@ -158,7 +158,6 @@
#define ISIS_MASK_LSP_ATT_ERROR_BIT(x) ((x)&0x40)
#define ISIS_MASK_LSP_ATT_EXPENSE_BIT(x) ((x)&0x20)
#define ISIS_MASK_LSP_ATT_DELAY_BIT(x) ((x)&0x10)
-#define ISIS_MASK_LSP_ATT_DEFAULT_BIT(x) ((x)&0x8)
#define LLC_LEN 3
diff --git a/isisd/isis_dlpi.c b/isisd/isis_dlpi.c
index 06fb41430c..bb8c542597 100644
--- a/isisd/isis_dlpi.c
+++ b/isisd/isis_dlpi.c
@@ -57,11 +57,6 @@ static t_uscalar_t dlpi_ctl[1024]; /* DLPI control messages */
static const uint8_t ALL_L1_ISS[6] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x14};
static const uint8_t ALL_L2_ISS[6] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x15};
static const uint8_t ALL_ISS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x05};
-#if 0
-/* missing support for ES-IS on Solaris */
-static const uint8_t ALL_ESS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x04};
-#endif
-
static uint8_t sock_buff[16384];
static unsigned short pf_filter[] = {
diff --git a/isisd/isis_dr.c b/isisd/isis_dr.c
index d03f857a0c..f6175fe9a4 100644
--- a/isisd/isis_dr.c
+++ b/isisd/isis_dr.c
@@ -74,7 +74,8 @@ int isis_run_dr(struct thread *thread)
if (circuit->circ_type != CIRCUIT_T_BROADCAST) {
zlog_warn("%s: scheduled for non broadcast circuit from %s:%d",
- __func__, thread->schedfrom, thread->schedfrom_line);
+ __func__, thread->xref->xref.file,
+ thread->xref->xref.line);
return ISIS_WARNING;
}
diff --git a/isisd/isis_ldp_sync.c b/isisd/isis_ldp_sync.c
index 00bef5c782..585f769806 100644
--- a/isisd/isis_ldp_sync.c
+++ b/isisd/isis_ldp_sync.c
@@ -145,6 +145,7 @@ void isis_ldp_sync_state_req_msg(struct isis_circuit *circuit)
ils_debug("ldp_sync: send state request to LDP for %s",
ifp->name);
+ memset(&request, 0, sizeof(request));
strlcpy(request.name, ifp->name, sizeof(ifp->name));
request.proto = LDP_IGP_SYNC_IF_STATE_REQUEST;
request.ifindex = ifp->ifindex;
diff --git a/isisd/isis_lsp.c b/isisd/isis_lsp.c
index 47225ea2c3..6d2303817b 100644
--- a/isisd/isis_lsp.c
+++ b/isisd/isis_lsp.c
@@ -399,7 +399,98 @@ static void lsp_seqno_update(struct isis_lsp *lsp0)
return;
}
-static uint8_t lsp_bits_generate(int level, int overload_bit, int attached_bit)
+static bool isis_level2_adj_up(struct isis_area *curr_area)
+{
+ struct listnode *node, *cnode;
+ struct isis_circuit *circuit;
+ struct list *adjdb;
+ struct isis_adjacency *adj;
+ struct isis *isis = curr_area->isis;
+ struct isis_area *area;
+
+ /* lookup for a Level2 adjacency up in another area */
+ for (ALL_LIST_ELEMENTS_RO(isis->area_list, node, area)) {
+ if (area->area_tag
+ && strcmp(area->area_tag, curr_area->area_tag) == 0)
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO(area->circuit_list, cnode, circuit)) {
+ if (circuit->circ_type == CIRCUIT_T_BROADCAST) {
+ adjdb = circuit->u.bc.adjdb[1];
+ if (!adjdb || !adjdb->count)
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO(adjdb, node, adj)) {
+ if (adj->level != ISIS_ADJ_LEVEL1
+ && adj->adj_state == ISIS_ADJ_UP)
+ return true;
+ }
+ } else if (circuit->circ_type == CIRCUIT_T_P2P
+ && circuit->u.p2p.neighbor) {
+ adj = circuit->u.p2p.neighbor;
+ if (adj->level != ISIS_ADJ_LEVEL1
+ && adj->adj_state == ISIS_ADJ_UP)
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static void isis_reset_attach_bit(struct isis_adjacency *curr_adj)
+{
+ struct listnode *node;
+ struct isis_area *curr_area = curr_adj->circuit->area;
+ struct isis *isis = curr_area->isis;
+ struct isis_area *area;
+ struct lspdb_head *head;
+ struct isis_lsp *lsp;
+ uint8_t lspid[ISIS_SYS_ID_LEN + 2];
+
+ /* If new adjaceny is up and area is level2 or level1and2 verify if
+ * we have LSPs in other areas that should now set the attach bit.
+ *
+ * If adjacenty is down, verify if we no longer have another level2
+ * or level1and2 areas so that we should now remove the attach bit.
+ */
+ if (curr_area->is_type == IS_LEVEL_1)
+ return;
+
+ for (ALL_LIST_ELEMENTS_RO(isis->area_list, node, area)) {
+ if (area->area_tag
+ && strcmp(area->area_tag, curr_area->area_tag) == 0)
+ continue;
+
+ if (!area->attached_bit_send)
+ continue;
+
+ head = &area->lspdb[IS_LEVEL_1 - 1];
+ memset(lspid, 0, ISIS_SYS_ID_LEN + 2);
+ memcpy(lspid, area->isis->sysid, ISIS_SYS_ID_LEN);
+
+ lsp = lsp_search(head, lspid);
+ if (!lsp)
+ continue;
+
+ if (curr_adj->adj_state == ISIS_ADJ_UP
+ && !(lsp->hdr.lsp_bits & LSPBIT_ATT)) {
+ sched_debug(
+ "ISIS (%s): adj going up regenerate lsp-bits",
+ area->area_tag);
+ lsp_regenerate_schedule(area, IS_LEVEL_1, 0);
+ } else if (curr_adj->adj_state == ISIS_ADJ_DOWN
+ && lsp->hdr.lsp_bits & LSPBIT_ATT
+ && !isis_level2_adj_up(area)) {
+ sched_debug(
+ "ISIS (%s): adj going down regenerate lsp-bits",
+ area->area_tag);
+ lsp_regenerate_schedule(area, IS_LEVEL_1, 0);
+ }
+ }
+}
+
+static uint8_t lsp_bits_generate(int level, int overload_bit, int attached_bit,
+ struct isis_area *area)
{
uint8_t lsp_bits = 0;
if (level == IS_LEVEL_1)
@@ -408,8 +499,13 @@ static uint8_t lsp_bits_generate(int level, int overload_bit, int attached_bit)
lsp_bits = IS_LEVEL_1_AND_2;
if (overload_bit)
lsp_bits |= overload_bit;
- if (attached_bit)
- lsp_bits |= attached_bit;
+
+ /* only set the attach bit if we are a level-1-2 router and this is
+ * a level-1 LSP and we have a level-2 adjacency up from another area
+ */
+ if (area->is_type == IS_LEVEL_1_AND_2 && level == IS_LEVEL_1
+ && attached_bit && isis_level2_adj_up(area))
+ lsp_bits |= LSPBIT_ATT;
return lsp_bits;
}
@@ -632,13 +728,13 @@ static const char *lsp_bits2string(uint8_t lsp_bits, char *buf, size_t buf_size)
return " error";
/* we only focus on the default metric */
- pos += sprintf(pos, "%d/",
- ISIS_MASK_LSP_ATT_DEFAULT_BIT(lsp_bits) ? 1 : 0);
+ pos += snprintf(pos, buf_size, "%d/",
+ ISIS_MASK_LSP_ATT_BITS(lsp_bits) ? 1 : 0);
- pos += sprintf(pos, "%d/",
- ISIS_MASK_LSP_PARTITION_BIT(lsp_bits) ? 1 : 0);
+ pos += snprintf(pos, buf_size, "%d/",
+ ISIS_MASK_LSP_PARTITION_BIT(lsp_bits) ? 1 : 0);
- sprintf(pos, "%d", ISIS_MASK_LSP_OL_BIT(lsp_bits) ? 1 : 0);
+ snprintf(pos, buf_size, "%d", ISIS_MASK_LSP_OL_BIT(lsp_bits) ? 1 : 0);
return buf;
}
@@ -838,7 +934,7 @@ static struct isis_lsp *lsp_next_frag(uint8_t frag_num, struct isis_lsp *lsp0,
lsp = lsp_new(area, frag_id, lsp0->hdr.rem_lifetime, 0,
lsp_bits_generate(level, area->overload_bit,
- area->attached_bit),
+ area->attached_bit_send, area),
0, lsp0, level);
lsp->own_lsp = 1;
lsp_insert(&area->lspdb[level - 1], lsp);
@@ -864,7 +960,7 @@ static void lsp_build(struct isis_lsp *lsp, struct isis_area *area)
area->area_tag, level);
lsp->hdr.lsp_bits = lsp_bits_generate(level, area->overload_bit,
- area->attached_bit);
+ area->attached_bit_send, area);
lsp_add_auth(lsp);
@@ -1223,10 +1319,10 @@ int lsp_generate(struct isis_area *area, int level)
oldlsp->hdr.lsp_id);
}
rem_lifetime = lsp_rem_lifetime(area, level);
- newlsp =
- lsp_new(area, lspid, rem_lifetime, seq_num,
- area->is_type | area->overload_bit | area->attached_bit,
- 0, NULL, level);
+ newlsp = lsp_new(area, lspid, rem_lifetime, seq_num,
+ lsp_bits_generate(area->is_type, area->overload_bit,
+ area->attached_bit_send, area),
+ 0, NULL, level);
newlsp->area = area;
newlsp->own_lsp = 1;
@@ -1310,8 +1406,9 @@ static int lsp_regenerate(struct isis_area *area, int level)
continue;
}
- frag->hdr.lsp_bits = lsp_bits_generate(
- level, area->overload_bit, area->attached_bit);
+ frag->hdr.lsp_bits =
+ lsp_bits_generate(level, area->overload_bit,
+ area->attached_bit_send, area);
/* Set the lifetime values of all the fragments to the same
* value,
* so that no fragment expires before the lsp is refreshed.
@@ -1518,8 +1615,8 @@ static void lsp_build_pseudo(struct isis_lsp *lsp, struct isis_circuit *circuit,
lsp->level = level;
/* RFC3787 section 4 SHOULD not set overload bit in pseudo LSPs */
- lsp->hdr.lsp_bits =
- lsp_bits_generate(level, 0, circuit->area->attached_bit);
+ lsp->hdr.lsp_bits = lsp_bits_generate(
+ level, 0, circuit->area->attached_bit_send, area);
/*
* add self to IS neighbours
@@ -1617,8 +1714,10 @@ int lsp_generate_pseudo(struct isis_circuit *circuit, int level)
rem_lifetime = lsp_rem_lifetime(circuit->area, level);
/* RFC3787 section 4 SHOULD not set overload bit in pseudo LSPs */
lsp = lsp_new(circuit->area, lsp_id, rem_lifetime, 1,
- circuit->area->is_type | circuit->area->attached_bit, 0,
- NULL, level);
+ lsp_bits_generate(circuit->area->is_type, 0,
+ circuit->area->attached_bit_send,
+ circuit->area),
+ 0, NULL, level);
lsp->area = circuit->area;
lsp_build_pseudo(lsp, circuit, level);
@@ -2036,6 +2135,12 @@ void _lsp_flood(struct isis_lsp *lsp, struct isis_circuit *circuit,
static int lsp_handle_adj_state_change(struct isis_adjacency *adj)
{
lsp_regenerate_schedule(adj->circuit->area, IS_LEVEL_1 | IS_LEVEL_2, 0);
+
+ /* when an adjacency state changes determine if we need to
+ * change attach_bits in other area's LSPs
+ */
+ isis_reset_attach_bit(adj);
+
return 0;
}
diff --git a/isisd/isis_nb.c b/isisd/isis_nb.c
index a02e6a45b1..6d46e6b67e 100644
--- a/isisd/isis_nb.c
+++ b/isisd/isis_nb.c
@@ -60,9 +60,22 @@ const struct frr_yang_module_info frr_isisd_info = {
},
},
{
+ .xpath = "/frr-isisd:isis/instance/attach-send",
+ .cbs = {
+ .cli_show = cli_show_isis_attached_send,
+ .modify = isis_instance_attached_send_modify,
+ },
+ },
+ {
+ .xpath = "/frr-isisd:isis/instance/attach-receive-ignore",
+ .cbs = {
+ .cli_show = cli_show_isis_attached_receive,
+ .modify = isis_instance_attached_receive_modify,
+ },
+ },
+ {
.xpath = "/frr-isisd:isis/instance/attached",
.cbs = {
- .cli_show = cli_show_isis_attached,
.modify = isis_instance_attached_modify,
},
},
diff --git a/isisd/isis_nb.h b/isisd/isis_nb.h
index 679bc6345d..8ecd8134e6 100644
--- a/isisd/isis_nb.h
+++ b/isisd/isis_nb.h
@@ -34,6 +34,8 @@ int isis_instance_is_type_modify(struct nb_cb_modify_args *args);
int isis_instance_area_address_create(struct nb_cb_create_args *args);
int isis_instance_area_address_destroy(struct nb_cb_destroy_args *args);
int isis_instance_dynamic_hostname_modify(struct nb_cb_modify_args *args);
+int isis_instance_attached_send_modify(struct nb_cb_modify_args *args);
+int isis_instance_attached_receive_modify(struct nb_cb_modify_args *args);
int isis_instance_attached_modify(struct nb_cb_modify_args *args);
int isis_instance_overload_modify(struct nb_cb_modify_args *args);
int isis_instance_metric_style_modify(struct nb_cb_modify_args *args);
@@ -424,8 +426,10 @@ void cli_show_isis_is_type(struct vty *vty, struct lyd_node *dnode,
bool show_defaults);
void cli_show_isis_dynamic_hostname(struct vty *vty, struct lyd_node *dnode,
bool show_defaults);
-void cli_show_isis_attached(struct vty *vty, struct lyd_node *dnode,
- bool show_defaults);
+void cli_show_isis_attached_send(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults);
+void cli_show_isis_attached_receive(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults);
void cli_show_isis_overload(struct vty *vty, struct lyd_node *dnode,
bool show_defaults);
void cli_show_isis_metric_style(struct vty *vty, struct lyd_node *dnode,
diff --git a/isisd/isis_nb_config.c b/isisd/isis_nb_config.c
index ed0fea8824..45bbc9737b 100644
--- a/isisd/isis_nb_config.c
+++ b/isisd/isis_nb_config.c
@@ -272,9 +272,9 @@ int isis_instance_dynamic_hostname_modify(struct nb_cb_modify_args *args)
}
/*
- * XPath: /frr-isisd:isis/instance/attached
+ * XPath: /frr-isisd:isis/instance/attach-send
*/
-int isis_instance_attached_modify(struct nb_cb_modify_args *args)
+int isis_instance_attached_send_modify(struct nb_cb_modify_args *args)
{
struct isis_area *area;
bool attached;
@@ -284,12 +284,38 @@ int isis_instance_attached_modify(struct nb_cb_modify_args *args)
area = nb_running_get_entry(args->dnode, NULL, true);
attached = yang_dnode_get_bool(args->dnode, NULL);
- isis_area_attached_bit_set(area, attached);
+ isis_area_attached_bit_send_set(area, attached);
return NB_OK;
}
/*
+ * XPath: /frr-isisd:isis/instance/attach-receive-ignore
+ */
+int isis_instance_attached_receive_modify(struct nb_cb_modify_args *args)
+{
+ struct isis_area *area;
+ bool attached;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ area = nb_running_get_entry(args->dnode, NULL, true);
+ attached = yang_dnode_get_bool(args->dnode, NULL);
+ isis_area_attached_bit_receive_set(area, attached);
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-isisd:isis/instance/attached
+ */
+int isis_instance_attached_modify(struct nb_cb_modify_args *args)
+{
+ return NB_OK;
+}
+
+/*
* XPath: /frr-isisd:isis/instance/overload
*/
int isis_instance_overload_modify(struct nb_cb_modify_args *args)
diff --git a/isisd/isis_pdu.c b/isisd/isis_pdu.c
index 72de5d6543..a02b48157f 100644
--- a/isisd/isis_pdu.c
+++ b/isisd/isis_pdu.c
@@ -167,7 +167,7 @@ static int process_p2p_hello(struct iih_info *iih)
if (adj) {
if (memcmp(iih->sys_id, adj->sysid, ISIS_SYS_ID_LEN)) {
zlog_debug(
- "hello source and adjacency do not match, set adj down\n");
+ "hello source and adjacency do not match, set adj down");
isis_adj_state_change(&adj, ISIS_ADJ_DOWN,
"adj do not exist");
return ISIS_OK;
@@ -729,8 +729,8 @@ static int process_hello(uint8_t pdu_type, struct isis_circuit *circuit,
if (!memcmp(iih.sys_id, circuit->isis->sysid, ISIS_SYS_ID_LEN)) {
zlog_warn(
- "ISIS-Adj (%s): Received IIH with own sysid - discard",
- circuit->area->area_tag);
+ "ISIS-Adj (%s): Received IIH with own sysid on %s - discard",
+ circuit->area->area_tag, circuit->interface->name);
circuit->rej_adjacencies++;
#ifndef FABRICD
isis_notif_reject_adjacency(
diff --git a/isisd/isis_spf.c b/isisd/isis_spf.c
index dee082fce1..22dfee994f 100644
--- a/isisd/isis_spf.c
+++ b/isisd/isis_spf.c
@@ -1046,6 +1046,34 @@ lspfragloop:
}
end:
+
+ /* if attach bit set in LSP, attached-bit receive ignore is
+ * not configured, we are a level-1 area and we have no other
+ * level-2 | level1-2 areas then add a default route toward
+ * this neighbor
+ */
+ if ((lsp->hdr.lsp_bits & LSPBIT_ATT) == LSPBIT_ATT
+ && !spftree->area->attached_bit_rcv_ignore
+ && spftree->area->is_type == IS_LEVEL_1
+ && !isis_area_count(spftree->area->isis, IS_LEVEL_2)) {
+ struct prefix_pair ip_info = { {0} };
+ if (IS_DEBUG_RTE_EVENTS)
+ zlog_debug("ISIS-Spf (%s): add default %s route",
+ rawlspid_print(lsp->hdr.lsp_id),
+ spftree->family == AF_INET ? "ipv4"
+ : "ipv6");
+
+ if (spftree->family == AF_INET) {
+ ip_info.dest.family = AF_INET;
+ vtype = VTYPE_IPREACH_INTERNAL;
+ } else {
+ ip_info.dest.family = AF_INET6;
+ vtype = VTYPE_IP6REACH_INTERNAL;
+ }
+ process_N(spftree, vtype, &ip_info, cost, depth + 1, NULL,
+ parent);
+ }
+
if (fragnode == NULL)
fragnode = listhead(lsp->lspu.frags);
else
diff --git a/isisd/isisd.c b/isisd/isisd.c
index eabebab4e0..a802bac13b 100644
--- a/isisd/isisd.c
+++ b/isisd/isisd.c
@@ -316,6 +316,11 @@ struct isis_area *isis_area_create(const char *area_tag, const char *vrf_name)
"/frr-isisd:isis/instance/fast-reroute/level-1/lfa/load-sharing");
area->lfa_load_sharing[1] = yang_get_default_bool(
"/frr-isisd:isis/instance/fast-reroute/level-2/lfa/load-sharing");
+ area->attached_bit_send =
+ yang_get_default_bool("/frr-isisd:isis/instance/attach-send");
+ area->attached_bit_rcv_ignore = yang_get_default_bool(
+ "/frr-isisd:isis/instance/attach-receive-ignore");
+
#else
area->max_lsp_lifetime[0] = DEFAULT_LSP_LIFETIME; /* 1200 */
area->max_lsp_lifetime[1] = DEFAULT_LSP_LIFETIME; /* 1200 */
@@ -332,6 +337,8 @@ struct isis_area *isis_area_create(const char *area_tag, const char *vrf_name)
area->lsp_mtu = DEFAULT_LSP_MTU;
area->lfa_load_sharing[0] = true;
area->lfa_load_sharing[1] = true;
+ area->attached_bit_send = true;
+ area->attached_bit_rcv_ignore = false;
#endif /* ifndef FABRICD */
area->lfa_priority_limit[0] = SPF_PREFIX_PRIO_LOW;
area->lfa_priority_limit[1] = SPF_PREFIX_PRIO_LOW;
@@ -417,6 +424,22 @@ int isis_area_get(struct vty *vty, const char *area_tag)
return CMD_SUCCESS;
}
+/* return the number of Level1 and level-1-2 routers or
+ * the number of Level2 and level-1-2 routers configured
+ */
+int isis_area_count(const struct isis *isis, int levels)
+{
+ struct isis_area *area;
+ struct listnode *node;
+ int count = 0;
+
+ for (ALL_LIST_ELEMENTS_RO(isis->area_list, node, area))
+ if (area->is_type & levels)
+ count++;
+
+ return count;
+}
+
void isis_area_destroy(struct isis_area *area)
{
struct listnode *node, *nnode;
@@ -2547,12 +2570,21 @@ void isis_area_overload_bit_set(struct isis_area *area, bool overload_bit)
#endif /* ifndef FABRICD */
}
-void isis_area_attached_bit_set(struct isis_area *area, bool attached_bit)
+void isis_area_attached_bit_send_set(struct isis_area *area, bool attached_bit)
+{
+
+ if (attached_bit != area->attached_bit_send) {
+ area->attached_bit_send = attached_bit;
+ lsp_regenerate_schedule(area, IS_LEVEL_1 | IS_LEVEL_2, 1);
+ }
+}
+
+void isis_area_attached_bit_receive_set(struct isis_area *area,
+ bool attached_bit)
{
- char new_attached_bit = attached_bit ? LSPBIT_ATT : 0;
- if (new_attached_bit != area->attached_bit) {
- area->attached_bit = new_attached_bit;
+ if (attached_bit != area->attached_bit_rcv_ignore) {
+ area->attached_bit_rcv_ignore = attached_bit;
lsp_regenerate_schedule(area, IS_LEVEL_1 | IS_LEVEL_2, 1);
}
}
diff --git a/isisd/isisd.h b/isisd/isisd.h
index 9b903eed48..22d9c6236d 100644
--- a/isisd/isisd.h
+++ b/isisd/isisd.h
@@ -169,7 +169,8 @@ struct isis_area {
/* are we overloaded? */
char overload_bit;
/* L1/L2 router identifier for inter-area traffic */
- char attached_bit;
+ char attached_bit_send;
+ char attached_bit_rcv_ignore;
uint16_t lsp_refresh[ISIS_LEVELS];
/* minimum time allowed before lsp retransmission */
uint16_t lsp_gen_interval[ISIS_LEVELS];
@@ -242,6 +243,7 @@ struct isis_area *isis_area_lookup(const char *, vrf_id_t vrf_id);
struct isis_area *isis_area_lookup_by_vrf(const char *area_tag,
const char *vrf_name);
int isis_area_get(struct vty *vty, const char *area_tag);
+int isis_area_count(const struct isis *isis, int levels);
void isis_area_destroy(struct isis_area *area);
void isis_filter_update(struct access_list *access);
void isis_prefix_list_update(struct prefix_list *plist);
@@ -253,7 +255,9 @@ void isis_area_invalidate_routes(struct isis_area *area, int levels);
void isis_area_verify_routes(struct isis_area *area);
void isis_area_overload_bit_set(struct isis_area *area, bool overload_bit);
-void isis_area_attached_bit_set(struct isis_area *area, bool attached_bit);
+void isis_area_attached_bit_send_set(struct isis_area *area, bool attached_bit);
+void isis_area_attached_bit_receive_set(struct isis_area *area,
+ bool attached_bit);
void isis_area_dynhostname_set(struct isis_area *area, bool dynhostname);
void isis_area_metricstyle_set(struct isis_area *area, bool old_metric,
bool new_metric);
diff --git a/lib/agentx.c b/lib/agentx.c
index 603d8d6172..5351f8bda2 100644
--- a/lib/agentx.c
+++ b/lib/agentx.c
@@ -32,6 +32,9 @@
#include "linklist.h"
#include "version.h"
#include "lib_errors.h"
+#include "xref.h"
+
+XREF_SETUP()
static int agentx_enabled = 0;
@@ -262,11 +265,28 @@ void smux_register_mib(const char *descr, struct variable *var, size_t width,
register_mib(descr, var, width, num, name, namelen);
}
-int smux_trap(struct variable *vp, size_t vp_len, const oid *ename,
- size_t enamelen, const oid *name, size_t namelen,
- const oid *iname, size_t inamelen,
- const struct trap_object *trapobj, size_t trapobjlen,
- uint8_t sptrap)
+void smux_trap(struct variable *vp, size_t vp_len, const oid *ename,
+ size_t enamelen, const oid *name, size_t namelen,
+ const oid *iname, size_t inamelen,
+ const struct trap_object *trapobj, size_t trapobjlen,
+ uint8_t sptrap)
+{
+ struct index_oid trap_index[1];
+
+ /* copy the single index into the multi-index format */
+ oid_copy(trap_index[0].indexname, iname, inamelen);
+ trap_index[0].indexlen = inamelen;
+
+ smux_trap_multi_index(vp, vp_len, ename, enamelen, name, namelen,
+ trap_index, array_size(trap_index), trapobj,
+ trapobjlen, sptrap);
+}
+
+int smux_trap_multi_index(struct variable *vp, size_t vp_len, const oid *ename,
+ size_t enamelen, const oid *name, size_t namelen,
+ struct index_oid *iname, size_t index_len,
+ const struct trap_object *trapobj, size_t trapobjlen,
+ uint8_t sptrap)
{
oid objid_snmptrap[] = {1, 3, 6, 1, 6, 3, 1, 1, 4, 1, 0};
size_t objid_snmptrap_len = sizeof(objid_snmptrap) / sizeof(oid);
@@ -296,6 +316,13 @@ int smux_trap(struct variable *vp, size_t vp_len, const oid *ename,
size_t val_len;
WriteMethod *wm = NULL;
struct variable cvp;
+ unsigned int iindex;
+ /*
+ * this allows the behaviour of smux_trap with a singe index
+ * for all objects to be maintained whilst allowing traps which
+ * have different indices per object to be supported
+ */
+ iindex = (index_len == 1) ? 0 : i;
/* Make OID. */
if (trapobj[i].namelen > 0) {
@@ -303,8 +330,10 @@ int smux_trap(struct variable *vp, size_t vp_len, const oid *ename,
onamelen = trapobj[i].namelen;
oid_copy(oid, name, namelen);
oid_copy(oid + namelen, trapobj[i].name, onamelen);
- oid_copy(oid + namelen + onamelen, iname, inamelen);
- oid_len = namelen + onamelen + inamelen;
+ oid_copy(oid + namelen + onamelen,
+ iname[iindex].indexname,
+ iname[iindex].indexlen);
+ oid_len = namelen + onamelen + iname[iindex].indexlen;
} else {
/* Scalar object */
onamelen = trapobj[i].namelen * (-1);
@@ -330,6 +359,7 @@ int smux_trap(struct variable *vp, size_t vp_len, const oid *ename,
cvp.magic = vp[j].magic;
cvp.acl = vp[j].acl;
cvp.findVar = vp[j].findVar;
+
/* Grab the result. */
val = cvp.findVar(&cvp, oid, &oid_len, 1, &val_len,
&wm);
diff --git a/lib/bfd.c b/lib/bfd.c
index cdf7008601..3ab0e21af5 100644
--- a/lib/bfd.c
+++ b/lib/bfd.c
@@ -224,6 +224,17 @@ struct interface *bfd_get_peer_info(struct stream *s, struct prefix *dp,
int plen;
int local_remote_cbit;
+ /*
+ * If the ifindex lookup fails the
+ * rest of the data in the stream is
+ * not read. All examples of this function
+ * call immediately use the dp->family which
+ * is not good. Ensure we are not using
+ * random data
+ */
+ memset(dp, 0, sizeof(*dp));
+ memset(sp, 0, sizeof(*sp));
+
/* Get interface index. */
ifindex = stream_getl(s);
@@ -249,13 +260,12 @@ struct interface *bfd_get_peer_info(struct stream *s, struct prefix *dp,
/* Get BFD status. */
*status = stream_getl(s);
- if (sp) {
- sp->family = stream_getc(s);
+ sp->family = stream_getc(s);
+
+ plen = prefix_blen(sp);
+ stream_get(&sp->u.prefix, s, plen);
+ sp->prefixlen = stream_getc(s);
- plen = prefix_blen(sp);
- stream_get(&sp->u.prefix, s, plen);
- sp->prefixlen = stream_getc(s);
- }
local_remote_cbit = stream_getc(s);
if (remote_cbit)
*remote_cbit = local_remote_cbit;
diff --git a/lib/buffer.c b/lib/buffer.c
index 459d98e75d..42796faae8 100644
--- a/lib/buffer.c
+++ b/lib/buffer.c
@@ -468,16 +468,6 @@ buffer_status_t buffer_write(struct buffer *b, int fd, const void *p,
{
ssize_t nbytes;
-#if 0
- /*
- * Should we attempt to drain any previously buffered data?
- * This could help reduce latency in pushing out the data if
- * we are stuck in a long-running thread that is preventing
- * the main select loop from calling the flush thread...
- */
- if (b->head && (buffer_flush_available(b, fd) == BUFFER_ERROR))
- return BUFFER_ERROR;
-#endif
if (b->head)
/* Buffer is not empty, so do not attempt to write the new data.
*/
diff --git a/lib/clippy.c b/lib/clippy.c
index 2e09c24c66..15cd9d7a4b 100644
--- a/lib/clippy.c
+++ b/lib/clippy.c
@@ -107,7 +107,8 @@ int main(int argc, char **argv)
#include "log.h"
#include "zassert.h"
-void vzlog(int prio, const char *format, va_list args)
+void vzlogx(const struct xref_logmsg *xref, int prio,
+ const char *format, va_list args)
{
vfprintf(stderr, format, args);
fputs("\n", stderr);
diff --git a/lib/command.c b/lib/command.c
index f40fe6e2c5..6a4d504b2f 100644
--- a/lib/command.c
+++ b/lib/command.c
@@ -49,6 +49,8 @@
#include "northbound_cli.h"
#include "network.h"
+#include "frrscript.h"
+
DEFINE_MTYPE_STATIC(LIB, HOST, "Host config")
DEFINE_MTYPE(LIB, COMPLETION, "Completion item")
@@ -275,7 +277,7 @@ const char *cmd_prompt(enum node_type node)
}
/* Install a command into a node. */
-void install_element(enum node_type ntype, const struct cmd_element *cmd)
+void _install_element(enum node_type ntype, const struct cmd_element *cmd)
{
struct cmd_node *cnode;
@@ -321,7 +323,7 @@ void install_element(enum node_type ntype, const struct cmd_element *cmd)
vector_set(cnode->cmd_vector, (void *)cmd);
if (ntype == VIEW_NODE)
- install_element(ENABLE_NODE, cmd);
+ _install_element(ENABLE_NODE, cmd);
}
void uninstall_element(enum node_type ntype, const struct cmd_element *cmd)
@@ -2303,6 +2305,31 @@ done:
return CMD_SUCCESS;
}
+#if defined(DEV_BUILD) && defined(HAVE_SCRIPTING)
+DEFUN(script,
+ script_cmd,
+ "script SCRIPT",
+ "Test command - execute a script\n"
+ "Script name (same as filename in /etc/frr/scripts/\n")
+{
+ struct prefix p;
+
+ (void)str2prefix("1.2.3.4/24", &p);
+
+ struct frrscript *fs = frrscript_load(argv[1]->arg, NULL);
+
+ if (fs == NULL) {
+ vty_out(vty, "Script '/etc/frr/scripts/%s.lua' not found\n",
+ argv[1]->arg);
+ } else {
+ int ret = frrscript_call(fs, NULL);
+ vty_out(vty, "Script result: %d\n", ret);
+ }
+
+ return CMD_SUCCESS;
+}
+#endif
+
/* Set config filename. Called from vty.c */
void host_config_set(const char *filename)
{
@@ -2317,18 +2344,18 @@ const char *host_config_get(void)
void install_default(enum node_type node)
{
- install_element(node, &config_exit_cmd);
- install_element(node, &config_quit_cmd);
- install_element(node, &config_end_cmd);
- install_element(node, &config_help_cmd);
- install_element(node, &config_list_cmd);
- install_element(node, &show_cli_graph_cmd);
- install_element(node, &find_cmd);
+ _install_element(node, &config_exit_cmd);
+ _install_element(node, &config_quit_cmd);
+ _install_element(node, &config_end_cmd);
+ _install_element(node, &config_help_cmd);
+ _install_element(node, &config_list_cmd);
+ _install_element(node, &show_cli_graph_cmd);
+ _install_element(node, &find_cmd);
- install_element(node, &config_write_cmd);
- install_element(node, &show_running_config_cmd);
+ _install_element(node, &config_write_cmd);
+ _install_element(node, &show_running_config_cmd);
- install_element(node, &autocomplete_cmd);
+ _install_element(node, &autocomplete_cmd);
nb_cli_install_default(node);
}
@@ -2397,6 +2424,10 @@ void cmd_init(int terminal)
install_element(VIEW_NODE, &echo_cmd);
install_element(VIEW_NODE, &autocomplete_cmd);
install_element(VIEW_NODE, &find_cmd);
+#if defined(DEV_BUILD) && defined(HAVE_SCRIPTING)
+ install_element(VIEW_NODE, &script_cmd);
+#endif
+
install_element(ENABLE_NODE, &config_end_cmd);
install_element(ENABLE_NODE, &config_disable_cmd);
diff --git a/lib/command.h b/lib/command.h
index bfe64a7235..71abb20b05 100644
--- a/lib/command.h
+++ b/lib/command.h
@@ -239,7 +239,11 @@ struct cmd_node {
.attr = attrs, \
.daemon = dnum, \
.name = #cmdname, \
- };
+ .xref = XREF_INIT(XREFT_DEFUN, NULL, #funcname), \
+ }; \
+ XREF_LINK(cmdname.xref); \
+ /* end */
+
#define DEFUN_CMD_FUNC_DECL(funcname) \
static int funcname(const struct cmd_element *, struct vty *, int, \
@@ -414,7 +418,8 @@ struct cmd_node {
"<neighbor|interface|area|lsa|zebra|config|dbex|spf|route|lsdb|redistribute|hook|asbr|prefix|abr>"
#define AREA_TAG_STR "[area tag]\n"
#define COMMUNITY_AANN_STR "Community number where AA and NN are (0-65535)\n"
-#define COMMUNITY_VAL_STR "Community number in AA:NN format (where AA and NN are (0-65535)) or local-AS|no-advertise|no-export|internet or additive\n"
+#define COMMUNITY_VAL_STR \
+ "Community number in AA:NN format (where AA and NN are (0-65535)) or local-AS|no-advertise|no-export|internet|graceful-shutdown|accept-own-nexthop|accept-own|route-filter-translated-v4|route-filter-v4|route-filter-translated-v6|route-filter-v6|llgr-stale|no-llgr|blackhole|no-peer or additive\n"
#define MPLS_TE_STR "MPLS-TE specific commands\n"
#define LINK_PARAMS_STR "Configure interface link parameters\n"
#define OSPF_RI_STR "OSPF Router Information specific commands\n"
@@ -483,7 +488,29 @@ struct cmd_node {
/* Prototypes. */
extern void install_node(struct cmd_node *node);
extern void install_default(enum node_type);
-extern void install_element(enum node_type, const struct cmd_element *);
+
+struct xref_install_element {
+ struct xref xref;
+
+ const struct cmd_element *cmd_element;
+ enum node_type node_type;
+};
+
+#ifndef VTYSH_EXTRACT_PL
+#define install_element(node_type_, cmd_element_) do { \
+ static const struct xref_install_element _xref \
+ __attribute__((used)) = { \
+ .xref = XREF_INIT(XREFT_INSTALL_ELEMENT, NULL, \
+ __func__), \
+ .cmd_element = cmd_element_, \
+ .node_type = node_type_, \
+ }; \
+ XREF_LINK(_xref.xref); \
+ _install_element(node_type_, cmd_element_); \
+ } while (0)
+#endif
+
+extern void _install_element(enum node_type, const struct cmd_element *);
/* known issue with uninstall_element: changes to cmd_token->attr (i.e.
* deprecated/hidden) are not reversed. */
diff --git a/lib/command_graph.h b/lib/command_graph.h
index 179e104a57..09824460e6 100644
--- a/lib/command_graph.h
+++ b/lib/command_graph.h
@@ -31,6 +31,7 @@
#include "memory.h"
#include "vector.h"
#include "graph.h"
+#include "xref.h"
#ifdef __cplusplus
extern "C" {
@@ -105,6 +106,7 @@ struct cmd_element {
struct cmd_token *[]);
const char *name; /* symbol name for debugging */
+ struct xref xref;
};
/* text for <cr> command */
diff --git a/lib/command_parse.y b/lib/command_parse.y
index ba5225b702..8135d02b4b 100644
--- a/lib/command_parse.y
+++ b/lib/command_parse.y
@@ -496,7 +496,7 @@ terminate_graph (CMD_YYLTYPE *locp, struct parser_ctx *ctx,
zlog_err ("----------");
while (ctx->docstr && ctx->docstr[1] != '\0')
zlog_err ("%s", strsep(&ctx->docstr, "\n"));
- zlog_err ("----------\n");
+ zlog_err ("----------");
}
graph_add_edge (finalnode, end_token_node);
diff --git a/lib/compiler.h b/lib/compiler.h
index 217a60d888..70ef8e9bc8 100644
--- a/lib/compiler.h
+++ b/lib/compiler.h
@@ -279,6 +279,29 @@ extern "C" {
#define array_size(ar) (sizeof(ar) / sizeof(ar[0]))
+/* Some insane macros to count number of varargs to a functionlike macro */
+#define PP_ARG_N( \
+ _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, \
+ _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, \
+ _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, \
+ _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, \
+ _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, \
+ _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, \
+ _61, _62, _63, N, ...) N
+
+#define PP_RSEQ_N() \
+ 62, 61, 60, \
+ 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, \
+ 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, \
+ 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, \
+ 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, \
+ 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, \
+ 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+#define PP_NARG_(...) PP_ARG_N(__VA_ARGS__)
+#define PP_NARG(...) PP_NARG_(_, ##__VA_ARGS__, PP_RSEQ_N())
+
+
/* sigh. this is so ugly, it overflows and wraps to being nice again.
*
* printfrr() supports "%Ld" for <int64_t>, whatever that is typedef'd to.
diff --git a/lib/filter_cli.c b/lib/filter_cli.c
index 54b6cda9a5..5d66a9fc73 100644
--- a/lib/filter_cli.c
+++ b/lib/filter_cli.c
@@ -259,7 +259,7 @@ DEFPY_YANG(
/* Access-list must exist before entries. */
if (yang_dnode_exists(running_config->dnode, xpath) == false)
- return CMD_WARNING;
+ return CMD_WARNING_CONFIG_FAILED;
/* Use access-list data structure to fetch sequence. */
dnode = yang_dnode_get(running_config->dnode, xpath);
@@ -268,7 +268,7 @@ DEFPY_YANG(
mask_str ? mask_str : CISCO_HOST_WILDCARD_MASK,
NULL, NULL);
if (sseq == -1)
- return CMD_WARNING;
+ return CMD_WARNING_CONFIG_FAILED;
snprintfrr(xpath_entry, sizeof(xpath_entry),
"%s/entry[sequence='%" PRId64 "']", xpath, sseq);
@@ -436,7 +436,7 @@ DEFPY_YANG(
/* Access-list must exist before entries. */
if (yang_dnode_exists(running_config->dnode, xpath) == false)
- return CMD_WARNING;
+ return CMD_WARNING_CONFIG_FAILED;
/* Use access-list data structure to fetch sequence. */
dnode = yang_dnode_get(running_config->dnode, xpath);
@@ -469,7 +469,7 @@ DEFPY_YANG(
"0.0.0.0", CISCO_ANY_WILDCARD_MASK);
}
if (sseq == -1)
- return CMD_WARNING;
+ return CMD_WARNING_CONFIG_FAILED;
snprintfrr(xpath_entry, sizeof(xpath_entry),
"%s/entry[sequence='%" PRId64 "']", xpath, sseq);
@@ -588,7 +588,7 @@ DEFPY_YANG(
/* Access-list must exist before entries. */
if (yang_dnode_exists(running_config->dnode, xpath) == false)
- return CMD_WARNING;
+ return CMD_WARNING_CONFIG_FAILED;
/* Use access-list data structure to fetch sequence. */
dnode = yang_dnode_get(running_config->dnode, xpath);
@@ -601,7 +601,7 @@ DEFPY_YANG(
sseq = acl_zebra_get_seq(acl, action, (struct prefix *)prefix,
exact);
if (sseq == -1)
- return CMD_WARNING;
+ return CMD_WARNING_CONFIG_FAILED;
snprintfrr(xpath_entry, sizeof(xpath_entry),
"%s/entry[sequence='%" PRId64 "']", xpath, sseq);
@@ -786,7 +786,7 @@ DEFPY_YANG(
/* Access-list must exist before entries. */
if (yang_dnode_exists(running_config->dnode, xpath) == false)
- return CMD_WARNING;
+ return CMD_WARNING_CONFIG_FAILED;
/* Use access-list data structure to fetch sequence. */
dnode = yang_dnode_get(running_config->dnode, xpath);
@@ -799,7 +799,7 @@ DEFPY_YANG(
sseq = acl_zebra_get_seq(acl, action, (struct prefix *)prefix,
exact);
if (sseq == -1)
- return CMD_WARNING;
+ return CMD_WARNING_CONFIG_FAILED;
snprintfrr(xpath_entry, sizeof(xpath_entry),
"%s/entry[sequence='%" PRId64 "']", xpath, sseq);
@@ -979,7 +979,7 @@ DEFPY_YANG(
/* Access-list must exist before entries. */
if (yang_dnode_exists(running_config->dnode, xpath) == false)
- return CMD_WARNING;
+ return CMD_WARNING_CONFIG_FAILED;
/* Use access-list data structure to fetch sequence. */
dnode = yang_dnode_get(running_config->dnode, xpath);
@@ -992,7 +992,7 @@ DEFPY_YANG(
sseq = acl_zebra_get_seq(acl, action, (struct prefix *)prefix,
false);
if (sseq == -1)
- return CMD_WARNING;
+ return CMD_WARNING_CONFIG_FAILED;
snprintfrr(xpath_entry, sizeof(xpath_entry),
"%s/entry[sequence='%" PRId64 "']", xpath, sseq);
@@ -1277,7 +1277,7 @@ static int plist_remove(struct vty *vty, const char *iptype, const char *name,
/* Access-list must exist before entries. */
if (yang_dnode_exists(running_config->dnode, xpath) == false)
- return CMD_WARNING;
+ return CMD_WARNING_CONFIG_FAILED;
/* Use access-list data structure to fetch sequence. */
assert(action != NULL);
@@ -1290,7 +1290,7 @@ static int plist_remove(struct vty *vty, const char *iptype, const char *name,
pl = nb_running_get_entry(dnode, NULL, true);
pentry = prefix_list_entry_lookup(pl, p, plt, -1, le, ge);
if (pentry == NULL)
- return CMD_WARNING;
+ return CMD_WARNING_CONFIG_FAILED;
snprintfrr(xpath_entry, sizeof(xpath_entry),
"%s/entry[sequence='%" PRId64 "']", xpath, pentry->seq);
diff --git a/lib/frr_zmq.c b/lib/frr_zmq.c
index cc11d76700..33adcd7b80 100644
--- a/lib/frr_zmq.c
+++ b/lib/frr_zmq.c
@@ -135,9 +135,8 @@ static int frrzmq_read_msg(struct thread *t)
if (read)
frrzmq_check_events(cbp, &cb->write, ZMQ_POLLOUT);
- funcname_thread_add_read_write(
- THREAD_READ, t->master, frrzmq_read_msg, cbp, cb->fd,
- &cb->read.thread, t->funcname, t->schedfrom, t->schedfrom_line);
+ _thread_add_read_write(t->xref, t->master, frrzmq_read_msg, cbp,
+ cb->fd, &cb->read.thread);
return 0;
out_err:
@@ -148,14 +147,14 @@ out_err:
return 1;
}
-int funcname_frrzmq_thread_add_read(struct thread_master *master,
- void (*msgfunc)(void *arg, void *zmqsock),
- void (*partfunc)(void *arg, void *zmqsock,
- zmq_msg_t *msg,
- unsigned partnum),
- void (*errfunc)(void *arg, void *zmqsock),
- void *arg, void *zmqsock,
- struct frrzmq_cb **cbp, debugargdef)
+int _frrzmq_thread_add_read(const struct xref_threadsched *xref,
+ struct thread_master *master,
+ void (*msgfunc)(void *arg, void *zmqsock),
+ void (*partfunc)(void *arg, void *zmqsock,
+ zmq_msg_t *msg, unsigned partnum),
+ void (*errfunc)(void *arg, void *zmqsock),
+ void *arg, void *zmqsock,
+ struct frrzmq_cb **cbp)
{
int fd, events;
size_t len;
@@ -192,13 +191,11 @@ int funcname_frrzmq_thread_add_read(struct thread_master *master,
if (events & ZMQ_POLLIN) {
thread_cancel(&cb->read.thread);
- funcname_thread_add_event(master, frrzmq_read_msg, cbp, fd,
- &cb->read.thread, funcname, schedfrom,
- fromln);
+ _thread_add_event(xref, master, frrzmq_read_msg, cbp, fd,
+ &cb->read.thread);
} else
- funcname_thread_add_read_write(
- THREAD_READ, master, frrzmq_read_msg, cbp, fd,
- &cb->read.thread, funcname, schedfrom, fromln);
+ _thread_add_read_write(xref, master, frrzmq_read_msg, cbp, fd,
+ &cb->read.thread);
return 0;
}
@@ -244,10 +241,8 @@ static int frrzmq_write_msg(struct thread *t)
if (written)
frrzmq_check_events(cbp, &cb->read, ZMQ_POLLIN);
- funcname_thread_add_read_write(THREAD_WRITE, t->master,
- frrzmq_write_msg, cbp, cb->fd,
- &cb->write.thread, t->funcname,
- t->schedfrom, t->schedfrom_line);
+ _thread_add_read_write(t->xref, t->master, frrzmq_write_msg, cbp,
+ cb->fd, &cb->write.thread);
return 0;
out_err:
@@ -257,11 +252,12 @@ out_err:
cb->write.cb_error(cb->write.arg, cb->zmqsock);
return 1;
}
-int funcname_frrzmq_thread_add_write(struct thread_master *master,
- void (*msgfunc)(void *arg, void *zmqsock),
- void (*errfunc)(void *arg, void *zmqsock),
- void *arg, void *zmqsock,
- struct frrzmq_cb **cbp, debugargdef)
+
+int _frrzmq_thread_add_write(const struct xref_threadsched *xref,
+ struct thread_master *master,
+ void (*msgfunc)(void *arg, void *zmqsock),
+ void (*errfunc)(void *arg, void *zmqsock),
+ void *arg, void *zmqsock, struct frrzmq_cb **cbp)
{
int fd, events;
size_t len;
@@ -298,13 +294,11 @@ int funcname_frrzmq_thread_add_write(struct thread_master *master,
if (events & ZMQ_POLLOUT) {
thread_cancel(&cb->write.thread);
- funcname_thread_add_event(master, frrzmq_write_msg, cbp, fd,
- &cb->write.thread, funcname,
- schedfrom, fromln);
+ _thread_add_event(xref, master, frrzmq_write_msg, cbp, fd,
+ &cb->write.thread);
} else
- funcname_thread_add_read_write(
- THREAD_WRITE, master, frrzmq_write_msg, cbp, fd,
- &cb->write.thread, funcname, schedfrom, fromln);
+ _thread_add_read_write(xref, master, frrzmq_write_msg, cbp, fd,
+ &cb->write.thread);
return 0;
}
diff --git a/lib/frr_zmq.h b/lib/frr_zmq.h
index 4303df9ccd..d30cf8a841 100644
--- a/lib/frr_zmq.h
+++ b/lib/frr_zmq.h
@@ -67,18 +67,32 @@ extern void *frrzmq_context;
extern void frrzmq_init(void);
extern void frrzmq_finish(void);
-#define debugargdef const char *funcname, const char *schedfrom, int fromln
+#define _xref_zmq_a(type, f, d, call) \
+ ({ \
+ static const struct xref_threadsched _xref \
+ __attribute__((used)) = { \
+ .xref = XREF_INIT(XREFT_THREADSCHED, NULL, __func__), \
+ .funcname = #f, \
+ .dest = #d, \
+ .thread_type = THREAD_ ## type, \
+ }; \
+ XREF_LINK(_xref.xref); \
+ call; \
+ }) \
+ /* end */
/* core event registration, one of these 2 macros should be used */
#define frrzmq_thread_add_read_msg(m, f, e, a, z, d) \
- funcname_frrzmq_thread_add_read(m, f, NULL, e, a, z, d, #f, __FILE__, \
- __LINE__)
+ _xref_zmq_a(READ, f, d, \
+ _frrzmq_thread_add_read(&_xref, m, f, NULL, e, a, z, d))
+
#define frrzmq_thread_add_read_part(m, f, e, a, z, d) \
- funcname_frrzmq_thread_add_read(m, NULL, f, e, a, z, d, #f, __FILE__, \
- __LINE__)
+ _xref_zmq_a(READ, f, d, \
+ _frrzmq_thread_add_read(&_xref, m, NULL, f, e, a, z, d))
+
#define frrzmq_thread_add_write_msg(m, f, e, a, z, d) \
- funcname_frrzmq_thread_add_write(m, f, e, a, z, d, #f, __FILE__, \
- __LINE__)
+ _xref_zmq_a(WRITE, f, d, \
+ _frrzmq_thread_add_write(&_xref, m, f, e, a, z, d))
struct cb_core;
struct frrzmq_cb;
@@ -104,16 +118,18 @@ struct frrzmq_cb;
* may schedule the event to run as soon as libfrr is back in its main
* loop.
*/
-extern int funcname_frrzmq_thread_add_read(
- struct thread_master *master, void (*msgfunc)(void *arg, void *zmqsock),
+extern int _frrzmq_thread_add_read(
+ const struct xref_threadsched *xref, struct thread_master *master,
+ void (*msgfunc)(void *arg, void *zmqsock),
void (*partfunc)(void *arg, void *zmqsock, zmq_msg_t *msg,
unsigned partnum),
void (*errfunc)(void *arg, void *zmqsock), void *arg, void *zmqsock,
- struct frrzmq_cb **cb, debugargdef);
-extern int funcname_frrzmq_thread_add_write(
- struct thread_master *master, void (*msgfunc)(void *arg, void *zmqsock),
+ struct frrzmq_cb **cb);
+extern int _frrzmq_thread_add_write(
+ const struct xref_threadsched *xref, struct thread_master *master,
+ void (*msgfunc)(void *arg, void *zmqsock),
void (*errfunc)(void *arg, void *zmqsock), void *arg, void *zmqsock,
- struct frrzmq_cb **cb, debugargdef);
+ struct frrzmq_cb **cb);
extern void frrzmq_thread_cancel(struct frrzmq_cb **cb, struct cb_core *core);
diff --git a/lib/frrlua.c b/lib/frrlua.c
index 9f9cf8c1f6..d8aaa3aa3c 100644
--- a/lib/frrlua.c
+++ b/lib/frrlua.c
@@ -2,128 +2,365 @@
* This file defines the lua interface into
* FRRouting.
*
- * Copyright (C) 2016 Cumulus Networks, Inc.
- * Donald Sharp
+ * Copyright (C) 2016-2019 Cumulus Networks, Inc.
+ * Donald Sharp, Quentin Young
*
- * This file is part of FRRouting (FRR).
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
*
- * FRR is free software; you can redistribute it and/or modify it under the
- * terms of the GNU General Public License as published by the Free Software
- * Foundation; either version 2, or (at your option) any later version.
- *
- * FRR is distributed in the hope that it will be useful, but WITHOUT ANY
- * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
*
* You should have received a copy of the GNU General Public License along
- * with FRR; see the file COPYING. If not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <zebra.h>
-#if defined(HAVE_LUA)
+#ifdef HAVE_SCRIPTING
+
#include "prefix.h"
#include "frrlua.h"
#include "log.h"
+#include "buffer.h"
+
+/* Lua stuff */
-static int lua_zlog_debug(lua_State *L)
+/*
+ * FRR convenience functions.
+ *
+ * This section has convenience functions used to make interacting with the Lua
+ * stack easier.
+ */
+
+int frrlua_table_get_integer(lua_State *L, const char *key)
{
- int debug_lua = 1;
- const char *string = lua_tostring(L, 1);
+ int result;
- if (debug_lua)
- zlog_debug("%s", string);
+ lua_pushstring(L, key);
+ lua_gettable(L, -2);
- return 0;
+ result = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+
+ return result;
}
-const char *get_string(lua_State *L, const char *key)
+/*
+ * Encoders.
+ *
+ * This section has functions that convert internal FRR datatypes into Lua
+ * datatypes.
+ */
+
+void lua_pushprefix(lua_State *L, const struct prefix *prefix)
{
- const char *str;
+ char buffer[PREFIX_STRLEN];
- lua_pushstring(L, key);
- lua_gettable(L, -2);
+ lua_newtable(L);
+ lua_pushstring(L, prefix2str(prefix, buffer, PREFIX_STRLEN));
+ lua_setfield(L, -2, "network");
+ lua_pushinteger(L, prefix->prefixlen);
+ lua_setfield(L, -2, "length");
+ lua_pushinteger(L, prefix->family);
+ lua_setfield(L, -2, "family");
+}
- str = (const char *)lua_tostring(L, -1);
+void *lua_toprefix(lua_State *L, int idx)
+{
+ struct prefix *p = XCALLOC(MTYPE_TMP, sizeof(struct prefix));
+
+ lua_getfield(L, idx, "network");
+ (void)str2prefix(lua_tostring(L, -1), p);
lua_pop(L, 1);
- return str;
+ return p;
}
-int get_integer(lua_State *L, const char *key)
+void lua_pushinterface(lua_State *L, const struct interface *ifp)
{
- int result;
+ lua_newtable(L);
+ lua_pushstring(L, ifp->name);
+ lua_setfield(L, -2, "name");
+ lua_pushinteger(L, ifp->ifindex);
+ lua_setfield(L, -2, "ifindex");
+ lua_pushinteger(L, ifp->status);
+ lua_setfield(L, -2, "status");
+ lua_pushinteger(L, ifp->flags);
+ lua_setfield(L, -2, "flags");
+ lua_pushinteger(L, ifp->metric);
+ lua_setfield(L, -2, "metric");
+ lua_pushinteger(L, ifp->speed);
+ lua_setfield(L, -2, "speed");
+ lua_pushinteger(L, ifp->mtu);
+ lua_setfield(L, -2, "mtu");
+ lua_pushinteger(L, ifp->mtu6);
+ lua_setfield(L, -2, "mtu6");
+ lua_pushinteger(L, ifp->bandwidth);
+ lua_setfield(L, -2, "bandwidth");
+ lua_pushinteger(L, ifp->link_ifindex);
+ lua_setfield(L, -2, "link_ifindex");
+ lua_pushinteger(L, ifp->ll_type);
+ lua_setfield(L, -2, "linklayer_type");
+}
- lua_pushstring(L, key);
- lua_gettable(L, -2);
+void *lua_tointerface(lua_State *L, int idx)
+{
+ struct interface *ifp = XCALLOC(MTYPE_TMP, sizeof(struct interface));
- result = lua_tointeger(L, -1);
+ lua_getfield(L, idx, "name");
+ strlcpy(ifp->name, lua_tostring(L, -1), sizeof(ifp->name));
+ lua_pop(L, 1);
+ lua_getfield(L, idx, "ifindex");
+ ifp->ifindex = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+ lua_getfield(L, idx, "status");
+ ifp->status = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+ lua_getfield(L, idx, "flags");
+ ifp->flags = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+ lua_getfield(L, idx, "metric");
+ ifp->metric = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+ lua_getfield(L, idx, "speed");
+ ifp->speed = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+ lua_getfield(L, idx, "mtu");
+ ifp->mtu = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+ lua_getfield(L, idx, "mtu6");
+ ifp->mtu6 = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+ lua_getfield(L, idx, "bandwidth");
+ ifp->bandwidth = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+ lua_getfield(L, idx, "link_ifindex");
+ ifp->link_ifindex = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+ lua_getfield(L, idx, "linklayer_type");
+ ifp->ll_type = lua_tointeger(L, -1);
lua_pop(L, 1);
- return result;
+ return ifp;
}
-static void *lua_alloc(void *ud, void *ptr, size_t osize,
- size_t nsize)
+void lua_pushinaddr(lua_State *L, const struct in_addr *addr)
{
- (void)ud; (void)osize; /* not used */
- if (nsize == 0) {
- free(ptr);
- return NULL;
- } else
- return realloc(ptr, nsize);
+ char buf[INET_ADDRSTRLEN];
+ inet_ntop(AF_INET, addr, buf, sizeof(buf));
+
+ lua_newtable(L);
+ lua_pushinteger(L, addr->s_addr);
+ lua_setfield(L, -2, "value");
+ lua_pushstring(L, buf);
+ lua_setfield(L, -2, "string");
}
-lua_State *lua_initialize(const char *file)
+void *lua_toinaddr(lua_State *L, int idx)
{
- int status;
- lua_State *L = lua_newstate(lua_alloc, NULL);
+ struct in_addr *inaddr = XCALLOC(MTYPE_TMP, sizeof(struct in_addr));
- zlog_debug("Newstate: %p", L);
- luaL_openlibs(L);
- zlog_debug("Opened lib");
- status = luaL_loadfile(L, file);
- if (status) {
- zlog_debug("Failure to open %s %d", file, status);
- lua_close(L);
- return NULL;
- }
+ lua_getfield(L, idx, "value");
+ inaddr->s_addr = lua_tointeger(L, -1);
+ lua_pop(L, 1);
- lua_pcall(L, 0, LUA_MULTRET, 0);
- zlog_debug("Setting global function");
- lua_pushcfunction(L, lua_zlog_debug);
- lua_setglobal(L, "zlog_debug");
+ return inaddr;
+}
+
+
+void lua_pushin6addr(lua_State *L, const struct in6_addr *addr)
+{
+ char buf[INET6_ADDRSTRLEN];
+ inet_ntop(AF_INET6, addr, buf, sizeof(buf));
+
+ lua_newtable(L);
+ lua_pushlstring(L, (const char *)addr->s6_addr, 16);
+ lua_setfield(L, -2, "value");
+ lua_pushstring(L, buf);
+ lua_setfield(L, -2, "string");
+}
+
+void *lua_toin6addr(lua_State *L, int idx)
+{
+ struct in6_addr *in6addr = XCALLOC(MTYPE_TMP, sizeof(struct in6_addr));
+
+ lua_getfield(L, idx, "string");
+ inet_pton(AF_INET6, lua_tostring(L, -1), in6addr);
+ lua_pop(L, 1);
- return L;
+ return in6addr;
}
-void lua_setup_prefix_table(lua_State *L, const struct prefix *prefix)
+void lua_pushsockunion(lua_State *L, const union sockunion *su)
{
- char buffer[100];
+ char buf[SU_ADDRSTRLEN];
+ sockunion2str(su, buf, sizeof(buf));
lua_newtable(L);
- lua_pushstring(L, prefix2str(prefix, buffer, 100));
- lua_setfield(L, -2, "route");
- lua_pushinteger(L, prefix->family);
- lua_setfield(L, -2, "family");
- lua_setglobal(L, "prefix");
+ lua_pushlstring(L, (const char *)sockunion_get_addr(su),
+ sockunion_get_addrlen(su));
+ lua_setfield(L, -2, "value");
+ lua_pushstring(L, buf);
+ lua_setfield(L, -2, "string");
}
-enum lua_rm_status lua_run_rm_rule(lua_State *L, const char *rule)
+void *lua_tosockunion(lua_State *L, int idx)
{
- int status;
+ union sockunion *su = XCALLOC(MTYPE_TMP, sizeof(union sockunion));
+
+ lua_getfield(L, idx, "string");
+ str2sockunion(lua_tostring(L, -1), su);
+
+ return su;
+}
- lua_getglobal(L, rule);
- status = lua_pcall(L, 0, 1, 0);
- if (status) {
- zlog_debug("Executing Failure with function: %s: %d",
- rule, status);
- return LUA_RM_FAILURE;
+void lua_pushtimet(lua_State *L, const time_t *time)
+{
+ lua_pushinteger(L, *time);
+}
+
+void *lua_totimet(lua_State *L, int idx)
+{
+ time_t *t = XCALLOC(MTYPE_TMP, sizeof(time_t));
+
+ *t = lua_tointeger(L, idx);
+
+ return t;
+}
+
+void lua_pushintegerp(lua_State *L, const long long *num)
+{
+ lua_pushinteger(L, *num);
+}
+
+void *lua_tointegerp(lua_State *L, int idx)
+{
+ int isnum;
+ long long *num = XCALLOC(MTYPE_TMP, sizeof(long long));
+
+ *num = lua_tonumberx(L, idx, &isnum);
+ assert(isnum);
+
+ return num;
+}
+
+void *lua_tostringp(lua_State *L, int idx)
+{
+ char *string = XSTRDUP(MTYPE_TMP, lua_tostring(L, idx));
+
+ return string;
+}
+
+/*
+ * Logging.
+ *
+ * Lua-compatible wrappers for FRR logging functions.
+ */
+static const char *frrlua_log_thunk(lua_State *L)
+{
+ int nargs;
+
+ nargs = lua_gettop(L);
+ assert(nargs == 1);
+
+ return lua_tostring(L, 1);
+}
+
+static int frrlua_log_debug(lua_State *L)
+{
+ zlog_debug("%s", frrlua_log_thunk(L));
+ return 0;
+}
+
+static int frrlua_log_info(lua_State *L)
+{
+ zlog_info("%s", frrlua_log_thunk(L));
+ return 0;
+}
+
+static int frrlua_log_notice(lua_State *L)
+{
+ zlog_notice("%s", frrlua_log_thunk(L));
+ return 0;
+}
+
+static int frrlua_log_warn(lua_State *L)
+{
+ zlog_warn("%s", frrlua_log_thunk(L));
+ return 0;
+}
+
+static int frrlua_log_error(lua_State *L)
+{
+ zlog_err("%s", frrlua_log_thunk(L));
+ return 0;
+}
+
+static const luaL_Reg log_funcs[] = {
+ {"debug", frrlua_log_debug},
+ {"info", frrlua_log_info},
+ {"notice", frrlua_log_notice},
+ {"warn", frrlua_log_warn},
+ {"error", frrlua_log_error},
+ {},
+};
+
+void frrlua_export_logging(lua_State *L)
+{
+ lua_newtable(L);
+ luaL_setfuncs(L, log_funcs, 0);
+ lua_setglobal(L, "log");
+}
+
+/*
+ * Debugging.
+ */
+
+char *frrlua_stackdump(lua_State *L)
+{
+ int top = lua_gettop(L);
+
+ char tmpbuf[64];
+ struct buffer *buf = buffer_new(4098);
+
+ for (int i = 1; i <= top; i++) {
+ int t = lua_type(L, i);
+
+ switch (t) {
+ case LUA_TSTRING: /* strings */
+ snprintf(tmpbuf, sizeof(tmpbuf), "\"%s\"\n",
+ lua_tostring(L, i));
+ buffer_putstr(buf, tmpbuf);
+ break;
+ case LUA_TBOOLEAN: /* booleans */
+ snprintf(tmpbuf, sizeof(tmpbuf), "%s\n",
+ lua_toboolean(L, i) ? "true" : "false");
+ buffer_putstr(buf, tmpbuf);
+ break;
+ case LUA_TNUMBER: /* numbers */
+ snprintf(tmpbuf, sizeof(tmpbuf), "%g\n",
+ lua_tonumber(L, i));
+ buffer_putstr(buf, tmpbuf);
+ break;
+ default: /* other values */
+ snprintf(tmpbuf, sizeof(tmpbuf), "%s\n",
+ lua_typename(L, t));
+ buffer_putstr(buf, tmpbuf);
+ break;
+ }
}
- status = lua_tonumber(L, -1);
- return status;
+ char *result = XSTRDUP(MTYPE_TMP, buffer_getstr(buf));
+
+ buffer_free(buf);
+
+ return result;
}
-#endif
+
+#endif /* HAVE_SCRIPTING */
diff --git a/lib/frrlua.h b/lib/frrlua.h
index 40c7a67b89..6fb30938b0 100644
--- a/lib/frrlua.h
+++ b/lib/frrlua.h
@@ -1,88 +1,184 @@
/*
- * This file defines the lua interface into
- * FRRouting.
+ * Copyright (C) 2016-2019 Cumulus Networks, Inc.
+ * Donald Sharp, Quentin Young
*
- * Copyright (C) 2016 Cumulus Networks, Inc.
- * Donald Sharp
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
*
- * This file is part of FRRouting (FRR).
- *
- * FRR is free software; you can redistribute it and/or modify it under the
- * terms of the GNU General Public License as published by the Free Software
- * Foundation; either version 2, or (at your option) any later version.
- *
- * FRR is distributed in the hope that it will be useful, but WITHOUT ANY
- * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
*
* You should have received a copy of the GNU General Public License along
- * with FRR; see the file COPYING. If not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#ifndef __LUA_H__
-#define __LUA_H__
+#ifndef __FRRLUA_H__
+#define __FRRLUA_H__
+
+#include <zebra.h>
-#if defined(HAVE_LUA)
+#ifdef HAVE_SCRIPTING
-#include "lua.h"
-#include "lualib.h"
-#include "lauxlib.h"
+#include <lua.h>
+#include <lualib.h>
+#include <lauxlib.h>
+
+#include "prefix.h"
+#include "frrscript.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
- * These functions are helper functions that
- * try to glom some of the lua_XXX functionality
- * into what we actually need, instead of having
- * to make multiple calls to set up what
- * we want
+ * gcc-10 is complaining about the wrapper function
+ * not being compatible with lua_pushstring returning
+ * a char *. Let's wrapper it here to make our life
+ * easier
+ */
+static inline void lua_pushstring_wrapper(lua_State *L, const char *str)
+{
+ (void)lua_pushstring(L, str);
+}
+
+/*
+ * Converts a prefix to a Lua value and pushes it on the stack.
+ */
+void lua_pushprefix(lua_State *L, const struct prefix *prefix);
+
+/*
+ * Converts the Lua value at idx to a prefix.
+ *
+ * Returns:
+ * struct prefix allocated with MTYPE_TMP
+ */
+void *lua_toprefix(lua_State *L, int idx);
+
+/*
+ * Converts an interface to a Lua value and pushes it on the stack.
+ */
+void lua_pushinterface(lua_State *L, const struct interface *ifp);
+
+/*
+ * Converts the Lua value at idx to an interface.
+ *
+ * Returns:
+ * struct interface allocated with MTYPE_TMP. This interface is not hooked
+ * to anything, nor is it inserted in the global interface tree.
*/
-enum lua_rm_status {
- /*
- * Script function run failure. This will translate into a
- * deny
- */
- LUA_RM_FAILURE = 0,
- /*
- * No Match was found for the route map function
- */
- LUA_RM_NOMATCH,
- /*
- * Match was found but no changes were made to the
- * incoming data.
- */
- LUA_RM_MATCH,
- /*
- * Match was found and data was modified, so
- * figure out what changed
- */
- LUA_RM_MATCH_AND_CHANGE,
-};
+void *lua_tointerface(lua_State *L, int idx);
/*
- * Open up the lua.scr file and parse
- * initial global values, if any.
+ * Converts an in_addr to a Lua value and pushes it on the stack.
*/
-lua_State *lua_initialize(const char *file);
+void lua_pushinaddr(lua_State *L, const struct in_addr *addr);
-void lua_setup_prefix_table(lua_State *L, const struct prefix *prefix);
+/*
+ * Converts the Lua value at idx to an in_addr.
+ *
+ * Returns:
+ * struct in_addr allocated with MTYPE_TMP.
+ */
+void *lua_toinaddr(lua_State *L, int idx);
-enum lua_rm_status lua_run_rm_rule(lua_State *L, const char *rule);
+/*
+ * Converts an in6_addr to a Lua value and pushes it on the stack.
+ */
+void lua_pushin6addr(lua_State *L, const struct in6_addr *addr);
/*
- * Get particular string/integer information
- * from a table. It is *assumed* that
- * the table has already been selected
+ * Converts the Lua value at idx to an in6_addr.
+ *
+ * Returns:
+ * struct in6_addr allocated with MTYPE_TMP.
*/
-const char *get_string(lua_State *L, const char *key);
-int get_integer(lua_State *L, const char *key);
+void *lua_toin6addr(lua_State *L, int idx);
+
+/*
+ * Converts a time_t to a Lua value and pushes it on the stack.
+ */
+void lua_pushtimet(lua_State *L, const time_t *time);
+
+/*
+ * Converts the Lua value at idx to a time_t.
+ *
+ * Returns:
+ * time_t allocated with MTYPE_TMP.
+ */
+void *lua_totimet(lua_State *L, int idx);
+
+/*
+ * Converts a sockunion to a Lua value and pushes it on the stack.
+ */
+void lua_pushsockunion(lua_State *L, const union sockunion *su);
+
+/*
+ * Converts the Lua value at idx to a sockunion.
+ *
+ * Returns:
+ * sockunion allocated with MTYPE_TMP.
+ */
+void *lua_tosockunion(lua_State *L, int idx);
+
+/*
+ * Converts an int to a Lua value and pushes it on the stack.
+ */
+void lua_pushintegerp(lua_State *L, const long long *num);
+
+/*
+ * Converts the Lua value at idx to an int.
+ *
+ * Returns:
+ * int allocated with MTYPE_TMP.
+ */
+void *lua_tointegerp(lua_State *L, int idx);
+
+/*
+ * Pop string.
+ *
+ * Sets *string to a copy of the string at the top of the stack. The copy is
+ * allocated with MTYPE_TMP and the caller is responsible for freeing it.
+ */
+void *lua_tostringp(lua_State *L, int idx);
+
+/*
+ * Retrieve an integer from table on the top of the stack.
+ *
+ * key
+ * Key of string value in table
+ */
+int frrlua_table_get_integer(lua_State *L, const char *key);
+
+/*
+ * Exports a new table containing bindings to FRR zlog functions into the
+ * global namespace.
+ *
+ * From Lua, these functions may be accessed as:
+ *
+ * - log.debug()
+ * - log.info()
+ * - log.warn()
+ * - log.error()
+ *
+ * They take a single string argument.
+ */
+void frrlua_export_logging(lua_State *L);
+
+/*
+ * Dump Lua stack to a string.
+ *
+ * Return value must be freed with XFREE(MTYPE_TMP, ...);
+ */
+char *frrlua_stackdump(lua_State *L);
#ifdef __cplusplus
}
#endif
-#endif
-#endif
+#endif /* HAVE_SCRIPTING */
+
+#endif /* __FRRLUA_H__ */
diff --git a/lib/frrscript.c b/lib/frrscript.c
new file mode 100644
index 0000000000..10d400886d
--- /dev/null
+++ b/lib/frrscript.c
@@ -0,0 +1,272 @@
+/* Scripting foo
+ * Copyright (C) 2020 NVIDIA Corporation
+ * Quentin Young
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <zebra.h>
+
+#ifdef HAVE_SCRIPTING
+
+#include <stdarg.h>
+#include <lua.h>
+
+#include "frrscript.h"
+#include "frrlua.h"
+#include "memory.h"
+#include "hash.h"
+#include "log.h"
+
+
+DEFINE_MTYPE_STATIC(LIB, SCRIPT, "Scripting");
+
+/* Codecs */
+
+struct frrscript_codec frrscript_codecs_lib[] = {
+ {.typename = "integer",
+ .encoder = (encoder_func)lua_pushintegerp,
+ .decoder = lua_tointegerp},
+ {.typename = "string",
+ .encoder = (encoder_func)lua_pushstring_wrapper,
+ .decoder = lua_tostringp},
+ {.typename = "prefix",
+ .encoder = (encoder_func)lua_pushprefix,
+ .decoder = lua_toprefix},
+ {.typename = "interface",
+ .encoder = (encoder_func)lua_pushinterface,
+ .decoder = lua_tointerface},
+ {.typename = "in_addr",
+ .encoder = (encoder_func)lua_pushinaddr,
+ .decoder = lua_toinaddr},
+ {.typename = "in6_addr",
+ .encoder = (encoder_func)lua_pushin6addr,
+ .decoder = lua_toin6addr},
+ {.typename = "sockunion",
+ .encoder = (encoder_func)lua_pushsockunion,
+ .decoder = lua_tosockunion},
+ {.typename = "time_t",
+ .encoder = (encoder_func)lua_pushtimet,
+ .decoder = lua_totimet},
+ {}};
+
+/* Type codecs */
+
+struct hash *codec_hash;
+char scriptdir[MAXPATHLEN];
+
+static unsigned int codec_hash_key(const void *data)
+{
+ const struct frrscript_codec *c = data;
+
+ return string_hash_make(c->typename);
+}
+
+static bool codec_hash_cmp(const void *d1, const void *d2)
+{
+ const struct frrscript_codec *e1 = d1;
+ const struct frrscript_codec *e2 = d2;
+
+ return strmatch(e1->typename, e2->typename);
+}
+
+static void *codec_alloc(void *arg)
+{
+ struct frrscript_codec *tmp = arg;
+
+ struct frrscript_codec *e =
+ XCALLOC(MTYPE_SCRIPT, sizeof(struct frrscript_codec));
+ e->typename = XSTRDUP(MTYPE_SCRIPT, tmp->typename);
+ e->encoder = tmp->encoder;
+ e->decoder = tmp->decoder;
+
+ return e;
+}
+
+#if 0
+static void codec_free(struct codec *c)
+{
+ XFREE(MTYPE_TMP, c->typename);
+ XFREE(MTYPE_TMP, c);
+}
+#endif
+
+/* Generic script APIs */
+
+int frrscript_call(struct frrscript *fs, struct frrscript_env *env)
+{
+ struct frrscript_codec c = {};
+ const void *arg;
+ const char *bindname;
+
+ /* Encode script arguments */
+ for (int i = 0; env && env[i].val != NULL; i++) {
+ bindname = env[i].name;
+ c.typename = env[i].typename;
+ arg = env[i].val;
+
+ struct frrscript_codec *codec = hash_lookup(codec_hash, &c);
+ assert(codec && "No encoder for type");
+ codec->encoder(fs->L, arg);
+
+ lua_setglobal(fs->L, bindname);
+ }
+
+ int ret = lua_pcall(fs->L, 0, 0, 0);
+
+ switch (ret) {
+ case LUA_OK:
+ break;
+ case LUA_ERRRUN:
+ zlog_err("Script '%s' runtime error: %s", fs->name,
+ lua_tostring(fs->L, -1));
+ break;
+ case LUA_ERRMEM:
+ zlog_err("Script '%s' memory error: %s", fs->name,
+ lua_tostring(fs->L, -1));
+ break;
+ case LUA_ERRERR:
+ zlog_err("Script '%s' error handler error: %s", fs->name,
+ lua_tostring(fs->L, -1));
+ break;
+ case LUA_ERRGCMM:
+ zlog_err("Script '%s' garbage collector error: %s", fs->name,
+ lua_tostring(fs->L, -1));
+ break;
+ default:
+ zlog_err("Script '%s' unknown error: %s", fs->name,
+ lua_tostring(fs->L, -1));
+ break;
+ }
+
+ if (ret != LUA_OK) {
+ lua_pop(fs->L, 1);
+ goto done;
+ }
+
+done:
+ /* LUA_OK is 0, so we can just return lua_pcall's result directly */
+ return ret;
+}
+
+void *frrscript_get_result(struct frrscript *fs,
+ const struct frrscript_env *result)
+{
+ void *r;
+ struct frrscript_codec c = {.typename = result->typename};
+
+ struct frrscript_codec *codec = hash_lookup(codec_hash, &c);
+ assert(codec && "No encoder for type");
+
+ if (!codec->decoder) {
+ zlog_err("No script decoder for type '%s'", result->typename);
+ return NULL;
+ }
+
+ lua_getglobal(fs->L, result->name);
+ r = codec->decoder(fs->L, -1);
+ lua_pop(fs->L, 1);
+
+ return r;
+}
+
+void frrscript_register_type_codec(struct frrscript_codec *codec)
+{
+ struct frrscript_codec c = *codec;
+
+ if (hash_lookup(codec_hash, &c)) {
+ zlog_backtrace(LOG_ERR);
+ assert(!"Type codec double-registered.");
+ }
+
+ assert(hash_get(codec_hash, &c, codec_alloc));
+}
+
+void frrscript_register_type_codecs(struct frrscript_codec *codecs)
+{
+ for (int i = 0; codecs[i].typename != NULL; i++)
+ frrscript_register_type_codec(&codecs[i]);
+}
+
+struct frrscript *frrscript_load(const char *name,
+ int (*load_cb)(struct frrscript *))
+{
+ struct frrscript *fs = XCALLOC(MTYPE_SCRIPT, sizeof(struct frrscript));
+
+ fs->name = XSTRDUP(MTYPE_SCRIPT, name);
+ fs->L = luaL_newstate();
+ frrlua_export_logging(fs->L);
+
+ char fname[MAXPATHLEN * 2];
+ snprintf(fname, sizeof(fname), "%s/%s.lua", scriptdir, fs->name);
+
+ int ret = luaL_loadfile(fs->L, fname);
+
+ switch (ret) {
+ case LUA_OK:
+ break;
+ case LUA_ERRSYNTAX:
+ zlog_err("Failed loading script '%s': syntax error: %s", fname,
+ lua_tostring(fs->L, -1));
+ break;
+ case LUA_ERRMEM:
+ zlog_err("Failed loading script '%s': out-of-memory error: %s",
+ fname, lua_tostring(fs->L, -1));
+ break;
+ case LUA_ERRGCMM:
+ zlog_err(
+ "Failed loading script '%s': garbage collector error: %s",
+ fname, lua_tostring(fs->L, -1));
+ break;
+ case LUA_ERRFILE:
+ zlog_err("Failed loading script '%s': file read error: %s",
+ fname, lua_tostring(fs->L, -1));
+ break;
+ default:
+ zlog_err("Failed loading script '%s': unknown error: %s", fname,
+ lua_tostring(fs->L, -1));
+ break;
+ }
+
+ if (ret != LUA_OK)
+ goto fail;
+
+ if (load_cb && (*load_cb)(fs) != 0)
+ goto fail;
+
+ return fs;
+fail:
+ frrscript_unload(fs);
+ return NULL;
+}
+
+void frrscript_unload(struct frrscript *fs)
+{
+ lua_close(fs->L);
+ XFREE(MTYPE_SCRIPT, fs->name);
+ XFREE(MTYPE_SCRIPT, fs);
+}
+
+void frrscript_init(const char *sd)
+{
+ codec_hash = hash_create(codec_hash_key, codec_hash_cmp,
+ "Lua type encoders");
+
+ strlcpy(scriptdir, sd, sizeof(scriptdir));
+
+ /* Register core library types */
+ frrscript_register_type_codecs(frrscript_codecs_lib);
+}
+
+#endif /* HAVE_SCRIPTING */
diff --git a/lib/frrscript.h b/lib/frrscript.h
new file mode 100644
index 0000000000..f4057f531b
--- /dev/null
+++ b/lib/frrscript.h
@@ -0,0 +1,138 @@
+/* Scripting foo
+ * Copyright (C) 2020 NVIDIA Corporation
+ * Quentin Young
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __FRRSCRIPT_H__
+#define __FRRSCRIPT_H__
+
+#include <zebra.h>
+
+#ifdef HAVE_SCRIPTING
+
+#include <lua.h>
+#include "frrlua.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef void (*encoder_func)(lua_State *, const void *);
+typedef void *(*decoder_func)(lua_State *, int);
+
+struct frrscript_codec {
+ const char *typename;
+ encoder_func encoder;
+ decoder_func decoder;
+};
+
+struct frrscript {
+ /* Script name */
+ char *name;
+
+ /* Lua state */
+ struct lua_State *L;
+};
+
+struct frrscript_env {
+ /* Value type */
+ const char *typename;
+
+ /* Binding name */
+ const char *name;
+
+ /* Value */
+ const void *val;
+};
+
+/*
+ * Create new FRR script.
+ */
+struct frrscript *frrscript_load(const char *name,
+ int (*load_cb)(struct frrscript *));
+
+/*
+ * Destroy FRR script.
+ */
+void frrscript_unload(struct frrscript *fs);
+
+/*
+ * Register a Lua codec for a type.
+ *
+ * tname
+ * Name of type; e.g., "peer", "ospf_interface", etc. Chosen at will.
+ *
+ * codec(s)
+ * Function pointer to codec struct. Encoder function should push a Lua
+ * table representing the passed argument - which will have the C type
+ * associated with the chosen 'tname' to the provided stack. The decoder
+ * function should pop a value from the top of the stack and return a heap
+ * chunk containing that value. Allocations should be made with MTYPE_TMP.
+ *
+ * If using the plural function variant, pass a NULL-terminated array.
+ *
+ */
+void frrscript_register_type_codec(struct frrscript_codec *codec);
+void frrscript_register_type_codecs(struct frrscript_codec *codecs);
+
+/*
+ * Initialize scripting subsystem. Call this before anything else.
+ *
+ * scriptdir
+ * Directory in which to look for scripts
+ */
+void frrscript_init(const char *scriptdir);
+
+
+/*
+ * Call script.
+ *
+ * fs
+ * The script to call; this is obtained from frrscript_load().
+ *
+ * env
+ * The script's environment. Specify this as an array of frrscript_env.
+ *
+ * Returns:
+ * 0 if the script ran successfully, nonzero otherwise.
+ */
+int frrscript_call(struct frrscript *fs, struct frrscript_env *env);
+
+
+/*
+ * Get result from finished script.
+ *
+ * fs
+ * The script. This script must have been run already.
+ *
+ * result
+ * The result to extract from the script.
+ * This reuses the frrscript_env type, but only the typename and name fields
+ * need to be set. The value is returned directly.
+ *
+ * Returns:
+ * The script result of the specified name and type, or NULL.
+ */
+void *frrscript_get_result(struct frrscript *fs,
+ const struct frrscript_env *result);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* HAVE_SCRIPTING */
+
+#endif /* __FRRSCRIPT_H__ */
diff --git a/lib/hash.c b/lib/hash.c
index ed429b77d0..ec616ee724 100644
--- a/lib/hash.c
+++ b/lib/hash.c
@@ -32,7 +32,7 @@
#include "libfrr_trace.h"
DEFINE_MTYPE_STATIC(LIB, HASH, "Hash")
-DEFINE_MTYPE_STATIC(LIB, HASH_BACKET, "Hash Bucket")
+DEFINE_MTYPE_STATIC(LIB, HASH_BUCKET, "Hash Bucket")
DEFINE_MTYPE_STATIC(LIB, HASH_INDEX, "Hash Index")
static pthread_mutex_t _hashes_mtx = PTHREAD_MUTEX_INITIALIZER;
@@ -168,7 +168,7 @@ void *hash_get(struct hash *hash, void *data, void *(*alloc_func)(void *))
index = key & (hash->size - 1);
}
- bucket = XCALLOC(MTYPE_HASH_BACKET, sizeof(struct hash_bucket));
+ bucket = XCALLOC(MTYPE_HASH_BUCKET, sizeof(struct hash_bucket));
bucket->data = newdata;
bucket->key = key;
bucket->next = hash->index[index];
@@ -239,7 +239,7 @@ void *hash_release(struct hash *hash, void *data)
hash_update_ssq(hash, oldlen, newlen);
ret = bucket->data;
- XFREE(MTYPE_HASH_BACKET, bucket);
+ XFREE(MTYPE_HASH_BUCKET, bucket);
hash->count--;
break;
}
@@ -302,7 +302,7 @@ void hash_clean(struct hash *hash, void (*free_func)(void *))
if (free_func)
(*free_func)(hb->data);
- XFREE(MTYPE_HASH_BACKET, hb);
+ XFREE(MTYPE_HASH_BUCKET, hb);
hash->count--;
}
hash->index[i] = NULL;
diff --git a/lib/hash.h b/lib/hash.h
index 23e93b6d7d..47d951a34b 100644
--- a/lib/hash.h
+++ b/lib/hash.h
@@ -76,7 +76,7 @@ struct hash {
/* Data compare function. */
bool (*hash_cmp)(const void *, const void *);
- /* Backet alloc. */
+ /* Bucket alloc. */
unsigned long count;
struct hashstats stats;
diff --git a/lib/if.c b/lib/if.c
index c707c4c6d9..7ec53d356d 100644
--- a/lib/if.c
+++ b/lib/if.c
@@ -351,6 +351,40 @@ struct interface *if_lookup_by_index(ifindex_t ifindex, vrf_id_t vrf_id)
return NULL;
}
+/* Interface existance check by index. */
+struct interface *if_vrf_lookup_by_index_next(ifindex_t ifindex,
+ vrf_id_t vrf_id)
+{
+ struct vrf *vrf = vrf_lookup_by_id(vrf_id);
+ struct interface *tmp_ifp;
+ bool found = false;
+
+ if (!vrf)
+ return NULL;
+
+ if (ifindex == 0) {
+ tmp_ifp = RB_MIN(if_index_head, &vrf->ifaces_by_index);
+ /* skip the vrf interface */
+ if (tmp_ifp && if_is_vrf(tmp_ifp))
+ ifindex = tmp_ifp->ifindex;
+ else
+ return tmp_ifp;
+ }
+
+ RB_FOREACH (tmp_ifp, if_index_head, &vrf->ifaces_by_index) {
+ if (found) {
+ /* skip the vrf interface */
+ if (tmp_ifp && if_is_vrf(tmp_ifp))
+ continue;
+ else
+ return tmp_ifp;
+ }
+ if (tmp_ifp->ifindex == ifindex)
+ found = true;
+ }
+ return NULL;
+}
+
const char *ifindex2ifname(ifindex_t ifindex, vrf_id_t vrf_id)
{
struct interface *ifp;
@@ -802,70 +836,6 @@ void if_dump_all(void)
if_dump(ifp);
}
-#if 0
-/* For debug purpose. */
-DEFUN (show_address,
- show_address_cmd,
- "show address [vrf NAME]",
- SHOW_STR
- "address\n"
- VRF_CMD_HELP_STR)
-{
- int idx_vrf = 3;
- struct listnode *node;
- struct interface *ifp;
- struct connected *ifc;
- struct prefix *p;
- vrf_id_t vrf_id = VRF_DEFAULT;
-
- if (argc > 2)
- VRF_GET_ID (vrf_id, argv[idx_vrf]->arg);
-
- FOR_ALL_INTERFACES (vrf, ifp) {
- for (ALL_LIST_ELEMENTS_RO (ifp->connected, node, ifc)) {
- p = ifc->address;
-
- if (p->family == AF_INET)
- vty_out (vty, "%pFX\n", p);
- }
- }
- return CMD_SUCCESS;
-}
-
-DEFUN (show_address_vrf_all,
- show_address_vrf_all_cmd,
- "show address vrf all",
- SHOW_STR
- "address\n"
- VRF_ALL_CMD_HELP_STR)
-{
- struct vrf *vrf;
- struct listnode *node;
- struct interface *ifp;
- struct connected *ifc;
- struct prefix *p;
-
- RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
- {
- if (RB_EMPTY (if_name_head, &vrf->ifaces_by_name))
- continue;
-
- vty_out (vty, "\nVRF %s(%u)\n\n",
- VRF_LOGNAME(vrf), vrf->vrf_id);
-
- FOR_ALL_INTERFACES (vrf, ifp) {
- for (ALL_LIST_ELEMENTS_RO (ifp->connected, node, ifc)) {
- p = ifc->address;
-
- if (p->family == AF_INET)
- vty_out (vty, "%pFX\n", p);
- }
- }
- }
- return CMD_SUCCESS;
-}
-#endif
-
/* Allocate connected structure. */
struct connected *connected_new(void)
{
@@ -1083,84 +1053,6 @@ struct connected *connected_get_linklocal(struct interface *ifp)
return c;
}
-#if 0 /* this route_table of struct connected's is unused \
- * however, it would be good to use a route_table rather than \
- * a list.. \
- */
-/* Interface looking up by interface's address. */
-/* Interface's IPv4 address reverse lookup table. */
-struct route_table *ifaddr_ipv4_table;
-/* struct route_table *ifaddr_ipv6_table; */
-
-static void
-ifaddr_ipv4_add (struct in_addr *ifaddr, struct interface *ifp)
-{
- struct route_node *rn;
- struct prefix_ipv4 p;
-
- p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
- p.prefix = *ifaddr;
-
- rn = route_node_get (ifaddr_ipv4_table, (struct prefix *) &p);
- if (rn)
- {
- route_unlock_node (rn);
- zlog_info("ifaddr_ipv4_add(): address %pI4 is already added",
- ifaddr);
- return;
- }
- rn->info = ifp;
-}
-
-static void
-ifaddr_ipv4_delete (struct in_addr *ifaddr, struct interface *ifp)
-{
- struct route_node *rn;
- struct prefix_ipv4 p;
-
- p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
- p.prefix = *ifaddr;
-
- rn = route_node_lookup (ifaddr_ipv4_table, (struct prefix *) &p);
- if (! rn)
- {
- zlog_info("%s: can't find address %pI4", __func__, ifaddr);
- return;
- }
- rn->info = NULL;
- route_unlock_node (rn);
- route_unlock_node (rn);
-}
-
-/* Lookup interface by interface's IP address or interface index. */
-static struct interface *
-ifaddr_ipv4_lookup (struct in_addr *addr, ifindex_t ifindex)
-{
- struct prefix_ipv4 p;
- struct route_node *rn;
- struct interface *ifp;
-
- if (addr)
- {
- p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
- p.prefix = *addr;
-
- rn = route_node_lookup (ifaddr_ipv4_table, (struct prefix *) &p);
- if (! rn)
- return NULL;
-
- ifp = rn->info;
- route_unlock_node (rn);
- return ifp;
- }
- else
- return if_lookup_by_index(ifindex, VRF_DEFAULT);
-}
-#endif /* ifaddr_ipv4_table */
-
void if_terminate(struct vrf *vrf)
{
struct interface *ifp;
diff --git a/lib/if.h b/lib/if.h
index a2a40d0957..5bf52936ae 100644
--- a/lib/if.h
+++ b/lib/if.h
@@ -513,6 +513,8 @@ extern struct interface *if_create_name(const char *name, vrf_id_t vrf_id);
/* Create new interface, adds to index list only */
extern struct interface *if_create_ifindex(ifindex_t ifindex, vrf_id_t vrf_id);
extern struct interface *if_lookup_by_index(ifindex_t, vrf_id_t vrf_id);
+extern struct interface *if_vrf_lookup_by_index_next(ifindex_t ifindex,
+ vrf_id_t vrf_id);
extern struct interface *if_lookup_by_index_all_vrf(ifindex_t);
extern struct interface *if_lookup_exact_address(const void *matchaddr,
int family, vrf_id_t vrf_id);
diff --git a/lib/lib_vty.c b/lib/lib_vty.c
index cd8b5c9809..128261a39c 100644
--- a/lib/lib_vty.c
+++ b/lib/lib_vty.c
@@ -43,10 +43,14 @@
#include "vty.h"
#include "command.h"
-#ifdef HAVE_MALLINFO
+#if defined(HAVE_MALLINFO2) || defined(HAVE_MALLINFO)
static int show_memory_mallinfo(struct vty *vty)
{
+#if defined(HAVE_MALLINFO2)
+ struct mallinfo2 minfo = mallinfo2();
+#elif defined(HAVE_MALLINFO)
struct mallinfo minfo = mallinfo();
+#endif
char buf[MTYPE_MEMSTR_LEN];
vty_out(vty, "System allocator statistics:\n");
diff --git a/lib/libfrr.c b/lib/libfrr.c
index 8e7777a1a9..51b97369c9 100644
--- a/lib/libfrr.c
+++ b/lib/libfrr.c
@@ -43,6 +43,7 @@
#include "frrcu.h"
#include "frr_pthread.h"
#include "defaults.h"
+#include "frrscript.h"
DEFINE_HOOK(frr_late_init, (struct thread_master * tm), (tm))
DEFINE_HOOK(frr_very_late_init, (struct thread_master * tm), (tm))
@@ -55,6 +56,7 @@ char frr_vtydir[256];
const char frr_dbdir[] = DAEMON_DB_DIR;
#endif
const char frr_moduledir[] = MODULE_PATH;
+const char frr_scriptdir[] = SCRIPT_PATH;
char frr_protoname[256] = "NONE";
char frr_protonameinst[256] = "NONE";
@@ -69,6 +71,7 @@ static char vtypath_default[512];
bool debug_memstats_at_exit = false;
static bool nodetach_term, nodetach_daemon;
+static uint64_t startup_fds;
static char comb_optstr[256];
static struct option comb_lo[64];
@@ -100,6 +103,7 @@ static void opt_extend(const struct optspec *os)
#define OPTION_DB_FILE 1006
#define OPTION_LOGGING 1007
#define OPTION_LIMIT_FDS 1008
+#define OPTION_SCRIPTDIR 1009
static const struct option lo_always[] = {
{"help", no_argument, NULL, 'h'},
@@ -110,6 +114,7 @@ static const struct option lo_always[] = {
{"pathspace", required_argument, NULL, 'N'},
{"vty_socket", required_argument, NULL, OPTION_VTYSOCK},
{"moduledir", required_argument, NULL, OPTION_MODULEDIR},
+ {"scriptdir", required_argument, NULL, OPTION_SCRIPTDIR},
{"log", required_argument, NULL, OPTION_LOG},
{"log-level", required_argument, NULL, OPTION_LOGLEVEL},
{"tcli", no_argument, NULL, OPTION_TCLI},
@@ -126,6 +131,7 @@ static const struct optspec os_always = {
" -N, --pathspace Insert prefix into config & socket paths\n"
" --vty_socket Override vty socket path\n"
" --moduledir Override modules directory\n"
+ " --scriptdir Override scripts directory\n"
" --log Set Logging to stdout, syslog, or file:<name>\n"
" --log-level Set Logging Level to use, debug, info, warn, etc\n"
" --tcli Use transaction-based CLI\n"
@@ -336,6 +342,28 @@ void frr_preinit(struct frr_daemon_info *daemon, int argc, char **argv)
strlcpy(frr_protonameinst, di->logname, sizeof(frr_protonameinst));
di->cli_mode = FRR_CLI_CLASSIC;
+
+ /* we may be starting with extra FDs open for whatever purpose,
+ * e.g. logging, some module, etc. Recording them here allows later
+ * checking whether an fd is valid for such extension purposes,
+ * without this we could end up e.g. logging to a BGP session fd.
+ */
+ startup_fds = 0;
+ for (int i = 0; i < 64; i++) {
+ struct stat st;
+
+ if (fstat(i, &st))
+ continue;
+ if (S_ISDIR(st.st_mode) || S_ISBLK(st.st_mode))
+ continue;
+
+ startup_fds |= UINT64_C(0x1) << (uint64_t)i;
+ }
+}
+
+bool frr_is_startup_fd(int fd)
+{
+ return !!(startup_fds & (UINT64_C(0x1) << (uint64_t)fd));
}
void frr_opt_add(const char *optstr, const struct option *longopts,
@@ -533,6 +561,14 @@ static int frr_opt(int opt)
}
di->module_path = optarg;
break;
+ case OPTION_SCRIPTDIR:
+ if (di->script_path) {
+ fprintf(stderr, "--scriptdir option specified more than once!\n");
+ errors++;
+ break;
+ }
+ di->script_path = optarg;
+ break;
case OPTION_TCLI:
di->cli_mode = FRR_CLI_TRANSACTIONAL;
break;
@@ -717,6 +753,9 @@ struct thread_master *frr_init(void)
lib_cmd_init();
frr_pthread_init();
+#ifdef HAVE_SCRIPTING
+ frrscript_init(di->script_path ? di->script_path : frr_scriptdir);
+#endif
log_ref_init();
log_ref_vty_init();
diff --git a/lib/libfrr.h b/lib/libfrr.h
index 2e4dcbe093..825f502bdf 100644
--- a/lib/libfrr.h
+++ b/lib/libfrr.h
@@ -81,6 +81,7 @@ struct frr_daemon_info {
#endif
const char *vty_path;
const char *module_path;
+ const char *script_path;
const char *pathspace;
bool zpathspace;
@@ -137,7 +138,8 @@ extern __attribute__((__noreturn__)) void frr_help_exit(int status);
extern struct thread_master *frr_init(void);
extern const char *frr_get_progname(void);
extern enum frr_cli_mode frr_get_cli_mode(void);
-uint32_t frr_get_fd_limit(void);
+extern uint32_t frr_get_fd_limit(void);
+extern bool frr_is_startup_fd(int fd);
DECLARE_HOOK(frr_late_init, (struct thread_master * tm), (tm))
DECLARE_HOOK(frr_very_late_init, (struct thread_master * tm), (tm))
@@ -162,6 +164,7 @@ extern char frr_zclientpath[256];
extern const char frr_sysconfdir[];
extern char frr_vtydir[256];
extern const char frr_moduledir[];
+extern const char frr_scriptdir[];
extern char frr_protoname[];
extern char frr_protonameinst[];
diff --git a/lib/link_state.c b/lib/link_state.c
index f8fdda64f0..ecf0d0698d 100644
--- a/lib/link_state.c
+++ b/lib/link_state.c
@@ -1234,7 +1234,7 @@ void ls_dump_ted(struct ls_ted *ted)
/* Loop TED, start printing Node, then Attributes and finally Prefix */
frr_each(vertices, &ted->vertices, vertex) {
ls_vertex2msg(&msg, vertex);
- zlog_debug("\tTed node (%s %pI4 %s)",
+ zlog_debug(" Ted node (%s %pI4 %s)",
vertex->node->name[0] ? vertex->node->name
: "no name node",
&vertex->node->router_id,
@@ -1246,7 +1246,7 @@ void ls_dump_ted(struct ls_ted *ted)
for (ALL_LIST_ELEMENTS_RO(vertex->incoming_edges, lst_node,
vertex_edge)) {
zlog_debug(
- "\t\tinc edge key:%lldn attr key:%pI4 loc:(%pI4) rmt:(%pI4)",
+ " inc edge key:%"PRIu64"n attr key:%pI4 loc:(%pI4) rmt:(%pI4)",
vertex_edge->key,
&vertex_edge->attributes->adv.id.ip.addr,
&vertex_edge->attributes->standard.local,
@@ -1255,7 +1255,7 @@ void ls_dump_ted(struct ls_ted *ted)
for (ALL_LIST_ELEMENTS_RO(vertex->outgoing_edges, lst_node,
vertex_edge)) {
zlog_debug(
- "\t\tout edge key:%lld attr key:%pI4 loc:(%pI4) rmt:(%pI4)",
+ " out edge key:%"PRIu64" attr key:%pI4 loc:(%pI4) rmt:(%pI4)",
vertex_edge->key,
&vertex_edge->attributes->adv.id.ip.addr,
&vertex_edge->attributes->standard.local,
@@ -1264,7 +1264,8 @@ void ls_dump_ted(struct ls_ted *ted)
}
frr_each(edges, &ted->edges, edge) {
ls_edge2msg(&msg, edge);
- zlog_debug("\tTed edge key:%lld src:%s dst:%s", edge->key,
+ zlog_debug(" Ted edge key:%"PRIu64" src:%s dst:%s",
+ edge->key,
edge->source ? edge->source->node->name
: "no_source",
edge->destination ? edge->destination->node->name
@@ -1273,10 +1274,8 @@ void ls_dump_ted(struct ls_ted *ted)
frr_each(subnets, &ted->subnets, subnet) {
ls_subnet2msg(&msg, subnet);
zlog_debug(
- "\tTed subnet key:%s vertex:%pI4 pfx:%pFX",
- subnet->key.family == AF_INET
- ? inet_ntoa(subnet->key.u.prefix4)
- : inet6_ntoa(subnet->key.u.prefix6),
+ " Ted subnet key:%pFX vertex:%pI4 pfx:%pFX",
+ &subnet->key,
&subnet->vertex->node->adv.id.ip.addr,
&subnet->ls_pref->pref);
}
diff --git a/lib/log.c b/lib/log.c
index 03ed23a04b..b86d3022b4 100644
--- a/lib/log.c
+++ b/lib/log.c
@@ -161,8 +161,9 @@ void zlog_signal(int signo, const char *action, void *siginfo_v,
if (!tc)
bprintfrr(&fb, "no thread information available\n");
else
- bprintfrr(&fb, "in thread %s scheduled from %s:%d\n",
- tc->funcname, tc->schedfrom, tc->schedfrom_line);
+ bprintfrr(&fb, "in thread %s scheduled from %s:%d %s()\n",
+ tc->xref->funcname, tc->xref->xref.file,
+ tc->xref->xref.line, tc->xref->xref.func);
zlog_sigsafe(fb.buf, fb.pos - fb.buf);
}
@@ -179,6 +180,9 @@ void zlog_backtrace_sigsafe(int priority, void *program_counter)
unw_word_t ip, off, sp;
Dl_info dlinfo;
+ memset(&uc, 0, sizeof(uc));
+ memset(&cursor, 0, sizeof(cursor));
+
unw_getcontext(&uc);
unw_init_local(&cursor, &uc);
while (unw_step(&cursor) > 0) {
@@ -300,8 +304,9 @@ void zlog_thread_info(int log_level)
if (tc)
zlog(log_level,
- "Current thread function %s, scheduled from file %s, line %u",
- tc->funcname, tc->schedfrom, tc->schedfrom_line);
+ "Current thread function %s, scheduled from file %s, line %u in %s()",
+ tc->xref->funcname, tc->xref->xref.file,
+ tc->xref->xref.line, tc->xref->xref.func);
else
zlog(log_level, "Current thread not known/applicable");
}
diff --git a/lib/log.h b/lib/log.h
index 3d2f0ed829..7147253644 100644
--- a/lib/log.h
+++ b/lib/log.h
@@ -62,16 +62,6 @@ struct message {
const char *str;
};
-/* For logs which have error codes associated with them */
-#define flog_err(ferr_id, format, ...) \
- zlog_err("[EC %u] " format, ferr_id, ##__VA_ARGS__)
-#define flog_err_sys(ferr_id, format, ...) \
- flog_err(ferr_id, format, ##__VA_ARGS__)
-#define flog_warn(ferr_id, format, ...) \
- zlog_warn("[EC %u] " format, ferr_id, ##__VA_ARGS__)
-#define flog(priority, ferr_id, format, ...) \
- zlog(priority, "[EC %u] " format, ferr_id, ##__VA_ARGS__)
-
extern void zlog_thread_info(int log_level);
#define ZLOG_FILTERS_MAX 100 /* Max # of filters at once */
diff --git a/lib/module.c b/lib/module.c
index 14d5cfd44f..3d299a6a2e 100644
--- a/lib/module.c
+++ b/lib/module.c
@@ -43,6 +43,8 @@ union _frrmod_runtime_u frrmod_default = {
},
};
+XREF_SETUP()
+
// if defined(HAVE_SYS_WEAK_ALIAS_ATTRIBUTE)
// union _frrmod_runtime_u _frrmod_this_module
// __attribute__((weak, alias("frrmod_default")));
diff --git a/lib/module.h b/lib/module.h
index 79cf52d75a..5d8d9cfbcc 100644
--- a/lib/module.h
+++ b/lib/module.h
@@ -20,6 +20,9 @@
#include <stdint.h>
#include <stdbool.h>
+#include "compiler.h"
+#include "xref.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -75,7 +78,10 @@ extern union _frrmod_runtime_u _frrmod_this_module;
DSO_LOCAL union _frrmod_runtime_u _frrmod_this_module = {{ \
NULL, \
&_frrmod_info, \
- }};
+ }}; \
+ XREF_SETUP() \
+ /* end */
+
#define FRR_MODULE_SETUP(...) \
FRR_COREMOD_SETUP(__VA_ARGS__) \
DSO_SELF struct frrmod_runtime *frr_module = &_frrmod_this_module.r;
diff --git a/lib/network.c b/lib/network.c
index d2482bd55e..411661a5e1 100644
--- a/lib/network.c
+++ b/lib/network.c
@@ -121,21 +121,3 @@ float ntohf(float net)
{
return htonf(net);
}
-
-/**
- * Helper function that returns a random long value. The main purpose of
- * this function is to hide a `random()` call that gets flagged by coverity
- * scan and put it into one place.
- *
- * The main usage of this function should be for generating jitter or weak
- * random values for simple purposes.
- *
- * See 'man 3 random' for more information.
- *
- * \returns random long integer.
- */
-long frr_weak_random(void)
-{
- /* coverity[dont_call] */
- return random();
-}
diff --git a/lib/network.h b/lib/network.h
index 83c9e59e76..4a9666984f 100644
--- a/lib/network.h
+++ b/lib/network.h
@@ -45,7 +45,23 @@ extern int set_cloexec(int fd);
extern float htonf(float);
extern float ntohf(float);
-extern long frr_weak_random(void);
+/**
+ * Helper function that returns a random long value. The main purpose of
+ * this function is to hide a `random()` call that gets flagged by coverity
+ * scan and put it into one place.
+ *
+ * The main usage of this function should be for generating jitter or weak
+ * random values for simple purposes.
+ *
+ * See 'man 3 random' for more information.
+ *
+ * \returns random long integer.
+ */
+static inline long frr_weak_random(void)
+{
+ /* coverity[dont_call] */
+ return random();
+}
#ifdef __cplusplus
}
diff --git a/lib/northbound.h b/lib/northbound.h
index c37d66d580..8dd6b4c337 100644
--- a/lib/northbound.h
+++ b/lib/northbound.h
@@ -552,7 +552,7 @@ struct nb_node {
* from working properly on shared libraries. For those compilers, use a fixed
* size array to work around the problem.
*/
-#define YANG_MODULE_MAX_NODES 1400
+#define YANG_MODULE_MAX_NODES 2000
struct frr_yang_module_info {
/* YANG module name. */
diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c
index 7048df99fb..ad7dad5cb2 100644
--- a/lib/northbound_cli.c
+++ b/lib/northbound_cli.c
@@ -693,6 +693,12 @@ static int nb_write_config(struct nb_config *config, enum nb_cfg_format format,
__func__, safe_strerror(errno));
return -1;
}
+ if (fchmod(fd, CONFIGFILE_MASK) != 0) {
+ flog_warn(EC_LIB_SYSTEM_CALL,
+ "%s: fchmod() failed: %s(%d):", __func__,
+ safe_strerror(errno), errno);
+ return -1;
+ }
/* Make vty for configuration file. */
file_vty = vty_new();
@@ -1820,20 +1826,20 @@ static struct cmd_node nb_debug_node = {
void nb_cli_install_default(int node)
{
- install_element(node, &show_config_candidate_section_cmd);
+ _install_element(node, &show_config_candidate_section_cmd);
if (frr_get_cli_mode() != FRR_CLI_TRANSACTIONAL)
return;
- install_element(node, &config_commit_cmd);
- install_element(node, &config_commit_comment_cmd);
- install_element(node, &config_commit_check_cmd);
- install_element(node, &config_update_cmd);
- install_element(node, &config_discard_cmd);
- install_element(node, &show_config_running_cmd);
- install_element(node, &show_config_candidate_cmd);
- install_element(node, &show_config_compare_cmd);
- install_element(node, &show_config_transaction_cmd);
+ _install_element(node, &config_commit_cmd);
+ _install_element(node, &config_commit_comment_cmd);
+ _install_element(node, &config_commit_check_cmd);
+ _install_element(node, &config_update_cmd);
+ _install_element(node, &config_discard_cmd);
+ _install_element(node, &show_config_running_cmd);
+ _install_element(node, &show_config_candidate_cmd);
+ _install_element(node, &show_config_compare_cmd);
+ _install_element(node, &show_config_transaction_cmd);
}
/* YANG module autocomplete. */
diff --git a/lib/printf/printf-pos.c b/lib/printf/printf-pos.c
index cc03f7ef9a..ac775bea4e 100644
--- a/lib/printf/printf-pos.c
+++ b/lib/printf/printf-pos.c
@@ -384,6 +384,7 @@ reswitch: switch (ch) {
goto error;
break;
#endif /* !NO_FLOATING_POINT */
+#ifdef DANGEROUS_PERCENT_N
case 'n':
if (flags & INTMAXT)
error = addtype(&types, TP_INTMAXT);
@@ -404,6 +405,7 @@ reswitch: switch (ch) {
if (error)
goto error;
continue; /* no output */
+#endif
case 'O':
flags |= LONGINT;
/*FALLTHROUGH*/
@@ -576,6 +578,7 @@ reswitch: switch (ch) {
goto error;
break;
#endif /* !NO_FLOATING_POINT */
+#ifdef DANGEROUS_PERCENT_N
case 'n':
if (flags & INTMAXT)
error = addtype(&types, TP_INTMAXT);
@@ -596,6 +599,7 @@ reswitch: switch (ch) {
if (error)
goto error;
continue; /* no output */
+#endif
case 'O':
flags |= LONGINT;
/*FALLTHROUGH*/
diff --git a/lib/printf/vfprintf.c b/lib/printf/vfprintf.c
index 6ffccb3811..a0634cde4b 100644
--- a/lib/printf/vfprintf.c
+++ b/lib/printf/vfprintf.c
@@ -503,6 +503,11 @@ reswitch: switch (ch) {
size = (prec >= 0) ? strnlen(cp, prec) : strlen(cp);
sign = '\0';
break;
+#ifdef DANGEROUS_PERCENT_N
+ /* FRR does not use %n in printf formats. This is just left
+ * here in case someone tries to use %n and starts debugging
+ * why the f* it doesn't work
+ */
case 'n':
/*
* Assignment-like behavior is specified if the
@@ -526,6 +531,7 @@ reswitch: switch (ch) {
else
*GETARG(int *) = ret;
continue; /* no output */
+#endif
case 'O':
flags |= LONGINT;
/*FALLTHROUGH*/
diff --git a/lib/privs.c b/lib/privs.c
index 1bb5d059c8..5ca3c0d886 100644
--- a/lib/privs.c
+++ b/lib/privs.c
@@ -587,6 +587,8 @@ void zprivs_preinit(struct zebra_privs_t *zprivs)
}
}
+struct zebra_privs_t *lib_privs;
+
void zprivs_init(struct zebra_privs_t *zprivs)
{
gid_t groups[NGROUPS_MAX] = {};
@@ -598,6 +600,8 @@ void zprivs_init(struct zebra_privs_t *zprivs)
|| zprivs->cap_num_i))
return;
+ lib_privs = zprivs;
+
if (zprivs->user) {
ngroups = array_size(groups);
if (getgrouplist(zprivs->user, zprivs_state.zgid, groups,
@@ -701,6 +705,8 @@ void zprivs_terminate(struct zebra_privs_t *zprivs)
{
struct zebra_privs_refs_t *refs;
+ lib_privs = NULL;
+
if (!zprivs) {
fprintf(stderr, "%s: no privs struct given, terminating",
__func__);
diff --git a/lib/privs.h b/lib/privs.h
index 18ba8e8888..2dcdbe2e6c 100644
--- a/lib/privs.h
+++ b/lib/privs.h
@@ -100,6 +100,8 @@ struct zprivs_ids_t {
gid_t gid_vty; /* vty gid */
};
+extern struct zebra_privs_t *lib_privs;
+
/* initialise zebra privileges */
extern void zprivs_preinit(struct zebra_privs_t *zprivs);
extern void zprivs_init(struct zebra_privs_t *zprivs);
diff --git a/lib/resolver.c b/lib/resolver.c
index e5caadb2d0..c01284e29e 100644
--- a/lib/resolver.c
+++ b/lib/resolver.c
@@ -19,6 +19,9 @@
#include "lib_errors.h"
#include "resolver.h"
#include "command.h"
+#include "xref.h"
+
+XREF_SETUP()
struct resolver_state {
ares_channel channel;
diff --git a/lib/route_types.pl b/lib/route_types.pl
index 759e9b4729..8c216e13bd 100755
--- a/lib/route_types.pl
+++ b/lib/route_types.pl
@@ -130,7 +130,7 @@ sub codelist {
$str =~ s/ $//;
push @lines, $str . "\\n\" \\\n";
push @lines, " \" > - selected route, * - FIB route, q - queued, r - rejected, b - backup\\n\"";
- push @lines, " \" t - trapped, o - offload failure\\n\"";
+ push @lines, " \" t - trapped, o - offload failure\\n\\n\"";
return join("", @lines);
diff --git a/lib/routemap.c b/lib/routemap.c
index 1c2f43d968..7714086672 100644
--- a/lib/routemap.c
+++ b/lib/routemap.c
@@ -2240,7 +2240,7 @@ static void route_map_pentry_update(route_map_event_t event,
}
}
-static void route_map_pentry_process_dependency(struct hash_bucket *backet,
+static void route_map_pentry_process_dependency(struct hash_bucket *bucket,
void *data)
{
char *rmap_name = NULL;
@@ -2253,7 +2253,7 @@ static void route_map_pentry_process_dependency(struct hash_bucket *backet,
(struct route_map_pentry_dep *)data;
unsigned char family = pentry_dep->pentry->prefix.family;
- dep_data = (struct route_map_dep_data *)backet->data;
+ dep_data = (struct route_map_dep_data *)bucket->data;
if (!dep_data)
return;
@@ -2399,6 +2399,7 @@ route_map_result_t route_map_apply(struct route_map *map,
index = route_map_get_index(map, prefix, object,
(uint8_t *)&match_ret);
if (index) {
+ index->applied++;
if (rmap_debug)
zlog_debug(
"Best match route-map: %s, sequence: %d for pfx: %pFX, result: %s",
diff --git a/lib/sigevent.c b/lib/sigevent.c
index 8d583096f6..64cec1385d 100644
--- a/lib/sigevent.c
+++ b/lib/sigevent.c
@@ -258,7 +258,7 @@ core_handler(int signo, siginfo_t *siginfo, void *context)
static void trap_default_signals(void)
{
static const int core_signals[] = {
- SIGQUIT, SIGILL,
+ SIGQUIT, SIGILL, SIGABRT,
#ifdef SIGEMT
SIGEMT,
#endif
diff --git a/lib/smux.h b/lib/smux.h
index 6896f02354..e07df2369f 100644
--- a/lib/smux.h
+++ b/lib/smux.h
@@ -44,6 +44,29 @@ extern "C" {
#define IN_ADDR_SIZE sizeof(struct in_addr)
+/* IANAipRouteProtocol */
+#define IANAIPROUTEPROTOCOLOTHER 1
+#define IANAIPROUTEPROTOCOLLOCAL 2
+#define IANAIPROUTEPROTOCOLNETMGMT 3
+#define IANAIPROUTEPROTOCOLICMP 4
+#define IANAIPROUTEPROTOCOLEGP 5
+#define IANAIPROUTEPROTOCOLGGP 6
+#define IANAIPROUTEPROTOCOLHELLO 7
+#define IANAIPROUTEPROTOCOLRIP 8
+#define IANAIPROUTEPROTOCOLISIS 9
+#define IANAIPROUTEPROTOCOLESIS 10
+#define IANAIPROUTEPROTOCOLCISCOIGRP 11
+#define IANAIPROUTEPROTOCOLBBNSPFIGP 12
+#define IANAIPROUTEPROTOCOLOSPF 13
+#define IANAIPROUTEPROTOCOLBGP 14
+#define IANAIPROUTEPROTOCOLIDPR 15
+#define IANAIPROUTEPROTOCOLCISCOEIGRP 16
+#define IANAIPROUTEPROTOCOLDVMRP 17
+
+#define INETADDRESSTYPEUNKNOWN 0
+#define INETADDRESSTYPEIPV4 1
+#define INETADDRESSTYPEIPV6 2
+
#undef REGISTER_MIB
#define REGISTER_MIB(descr, var, vartype, theoid) \
smux_register_mib(descr, (struct variable *)var, \
@@ -56,19 +79,29 @@ struct trap_object {
oid name[MAX_OID_LEN];
};
+struct index_oid {
+ int indexlen;
+ oid indexname[MAX_OID_LEN];
+};
/* Declare SMUX return value. */
#define SNMP_LOCAL_VARIABLES \
static long snmp_int_val __attribute__((unused)); \
static struct in_addr snmp_in_addr_val __attribute__((unused));
-
+ static uint8_t snmp_octet_val __attribute__((unused));
#define SNMP_INTEGER(V) \
(*var_len = sizeof(snmp_int_val), snmp_int_val = V, \
(uint8_t *)&snmp_int_val)
+#define SNMP_OCTET(V) \
+ (*var_len = sizeof(snmp_octet_val), snmp_octet_val = V, \
+ (uint8_t *)&snmp_octet_val)
+
#define SNMP_IPADDRESS(V) \
(*var_len = sizeof(struct in_addr), snmp_in_addr_val = V, \
(uint8_t *)&snmp_in_addr_val)
+#define SNMP_IP6ADDRESS(V) (*var_len = sizeof(struct in6_addr), (uint8_t *)&V)
+
extern void smux_init(struct thread_master *tm);
extern void smux_register_mib(const char *, struct variable *, size_t, int,
oid[], size_t);
@@ -98,14 +131,24 @@ extern int smux_header_table(struct variable *, oid *, size_t *, int, size_t *,
The use of the arguments may differ depending on the implementation
used.
*/
-extern int smux_trap(struct variable *, size_t, const oid *, size_t,
- const oid *, size_t, const oid *, size_t,
- const struct trap_object *, size_t, uint8_t);
-
+extern void smux_trap(struct variable *, size_t, const oid *, size_t,
+ const oid *, size_t, const oid *, size_t,
+ const struct trap_object *, size_t, uint8_t);
+
+extern int smux_trap_multi_index(struct variable *vp, size_t vp_len,
+ const oid *ename, size_t enamelen,
+ const oid *name, size_t namelen,
+ struct index_oid *iname, size_t index_len,
+ const struct trap_object *trapobj,
+ size_t trapobjlen, uint8_t sptrap);
extern int oid_compare(const oid *, int, const oid *, int);
extern void oid2in_addr(oid[], int, struct in_addr *);
+extern void oid2int(oid oid[], int *dest);
extern void *oid_copy(void *, const void *, size_t);
extern void oid_copy_addr(oid[], const struct in_addr *, int);
+extern void oid_copy_int(oid oid[], int *val);
+extern void oid2string(oid oid[], int len, char *string);
+extern void oid_copy_str(oid oid[], const char *string, int len);
#ifdef __cplusplus
}
diff --git a/lib/snmp.c b/lib/snmp.c
index 736a3c62b8..e92f622bb9 100644
--- a/lib/snmp.c
+++ b/lib/snmp.c
@@ -64,6 +64,19 @@ void oid2in_addr(oid oid[], int len, struct in_addr *addr)
*pnt++ = oid[i];
}
+void oid2int(oid oid[], int *dest)
+{
+ uint8_t i;
+ uint8_t *pnt;
+ int network_dest;
+
+ pnt = (uint8_t *)&network_dest;
+
+ for (i = 0; i < sizeof(int); i++)
+ *pnt++ = oid[i];
+ *dest = ntohl(network_dest);
+}
+
void oid_copy_addr(oid oid[], const struct in_addr *addr, int len)
{
int i;
@@ -78,6 +91,47 @@ void oid_copy_addr(oid oid[], const struct in_addr *addr, int len)
oid[i] = *pnt++;
}
+void oid_copy_int(oid oid[], int *val)
+{
+ uint8_t i;
+ const uint8_t *pnt;
+ int network_val;
+
+ network_val = htonl(*val);
+ pnt = (uint8_t *)&network_val;
+
+ for (i = 0; i < sizeof(int); i++)
+ oid[i] = *pnt++;
+}
+
+void oid2string(oid oid[], int len, char *string)
+{
+ int i;
+ uint8_t *pnt;
+
+ if (len == 0)
+ return;
+
+ pnt = (uint8_t *)string;
+
+ for (i = 0; i < len; i++)
+ *pnt++ = (uint8_t)oid[i];
+}
+
+void oid_copy_str(oid oid[], const char *string, int len)
+{
+ int i;
+ const uint8_t *pnt;
+
+ if (len == 0)
+ return;
+
+ pnt = (uint8_t *)string;
+
+ for (i = 0; i < len; i++)
+ oid[i] = *pnt++;
+}
+
int smux_header_generic(struct variable *v, oid *name, size_t *length,
int exact, size_t *var_len, WriteMethod **write_method)
{
diff --git a/lib/stream.c b/lib/stream.c
index e4e37b7315..ef73c2fdc9 100644
--- a/lib/stream.c
+++ b/lib/stream.c
@@ -57,7 +57,7 @@ DEFINE_MTYPE_STATIC(LIB, STREAM_FIFO, "Stream FIFO")
#define STREAM_WARN_OFFSETS(S) \
do { \
flog_warn(EC_LIB_STREAM, \
- "&(struct stream): %p, size: %lu, getp: %lu, endp: %lu\n", \
+ "&(struct stream): %p, size: %lu, getp: %lu, endp: %lu", \
(void *)(S), (unsigned long)(S)->size, \
(unsigned long)(S)->getp, (unsigned long)(S)->endp); \
zlog_backtrace(LOG_WARNING); \
@@ -93,7 +93,7 @@ DEFINE_MTYPE_STATIC(LIB, STREAM_FIFO, "Stream FIFO")
if (((S)->endp + (Z)) > (S)->size) { \
flog_warn( \
EC_LIB_STREAM, \
- "CHECK_SIZE: truncating requested size %lu\n", \
+ "CHECK_SIZE: truncating requested size %lu", \
(unsigned long)(Z)); \
STREAM_WARN_OFFSETS(S); \
(Z) = (S)->size - (S)->endp; \
diff --git a/lib/subdir.am b/lib/subdir.am
index ee9e827ee8..d5ffa08546 100644
--- a/lib/subdir.am
+++ b/lib/subdir.am
@@ -26,6 +26,7 @@ lib_libfrr_la_SOURCES = \
lib/filter_nb.c \
lib/frrcu.c \
lib/frrlua.c \
+ lib/frrscript.c \
lib/frr_pthread.c \
lib/frrstr.c \
lib/getopt.c \
@@ -102,6 +103,7 @@ lib_libfrr_la_SOURCES = \
lib/vty.c \
lib/wheel.c \
lib/workqueue.c \
+ lib/xref.c \
lib/yang.c \
lib/yang_translator.c \
lib/yang_wrappers.c \
@@ -185,6 +187,7 @@ pkginclude_HEADERS += \
lib/filter.h \
lib/freebsd-queue.h \
lib/frrlua.h \
+ lib/frrscript.h \
lib/frr_pthread.h \
lib/frratomic.h \
lib/frrcu.h \
@@ -266,6 +269,7 @@ pkginclude_HEADERS += \
lib/vxlan.h \
lib/wheel.h \
lib/workqueue.h \
+ lib/xref.h \
lib/yang.h \
lib/yang_translator.h \
lib/yang_wrappers.h \
@@ -409,6 +413,7 @@ lib_clippy_CFLAGS = $(PYTHON_CFLAGS)
lib_clippy_LDADD = $(PYTHON_LIBS) $(UST_LIBS)
lib_clippy_LDFLAGS = -export-dynamic
lib_clippy_SOURCES = \
+ lib/jhash.c \
lib/clippy.c \
lib/command_graph.c \
lib/command_lex.l \
diff --git a/lib/thread.c b/lib/thread.c
index c886058355..e0d734a951 100644
--- a/lib/thread.c
+++ b/lib/thread.c
@@ -119,7 +119,7 @@ static void vty_out_cpu_thread_history(struct vty *vty,
a->total_active, a->cpu.total / 1000, a->cpu.total % 1000,
a->total_calls, (a->cpu.total / a->total_calls), a->cpu.max,
(a->real.total / a->total_calls), a->real.max);
- vty_out(vty, " %c%c%c%c%c %s\n",
+ vty_out(vty, " %c%c%c%c%c %s\n",
a->types & (1 << THREAD_READ) ? 'R' : ' ',
a->types & (1 << THREAD_WRITE) ? 'W' : ' ',
a->types & (1 << THREAD_TIMER) ? 'T' : ' ',
@@ -188,7 +188,7 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
name);
vty_out(vty, "-------------------------------%s\n",
underline);
- vty_out(vty, "%21s %18s %18s\n", "",
+ vty_out(vty, "%30s %18s %18s\n", "",
"CPU (user+system):", "Real (wall-clock):");
vty_out(vty,
"Active Runtime(ms) Invoked Avg uSec Max uSecs");
@@ -211,7 +211,7 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
vty_out(vty, "\n");
vty_out(vty, "Total thread statistics\n");
vty_out(vty, "-------------------------\n");
- vty_out(vty, "%21s %18s %18s\n", "",
+ vty_out(vty, "%30s %18s %18s\n", "",
"CPU (user+system):", "Real (wall-clock):");
vty_out(vty, "Active Runtime(ms) Invoked Avg uSec Max uSecs");
vty_out(vty, " Avg uSec Max uSecs");
@@ -342,7 +342,7 @@ static void show_thread_poll_helper(struct vty *vty, struct thread_master *m)
if (!thread)
vty_out(vty, "ERROR ");
else
- vty_out(vty, "%s ", thread->funcname);
+ vty_out(vty, "%s ", thread->xref->funcname);
} else
vty_out(vty, " ");
@@ -352,7 +352,7 @@ static void show_thread_poll_helper(struct vty *vty, struct thread_master *m)
if (!thread)
vty_out(vty, "ERROR\n");
else
- vty_out(vty, "%s\n", thread->funcname);
+ vty_out(vty, "%s\n", thread->xref->funcname);
} else
vty_out(vty, "\n");
}
@@ -633,9 +633,6 @@ unsigned long thread_timer_remain_second(struct thread *thread)
return thread_timer_remain_msec(thread) / 1000LL;
}
-#define debugargdef const char *funcname, const char *schedfrom, int fromln
-#define debugargpass funcname, schedfrom, fromln
-
struct timeval thread_timer_remain(struct thread *thread)
{
struct timeval remain;
@@ -678,7 +675,7 @@ char *thread_timer_to_hhmmss(char *buf, int buf_size,
/* Get new thread. */
static struct thread *thread_get(struct thread_master *m, uint8_t type,
int (*func)(struct thread *), void *arg,
- debugargdef)
+ const struct xref_threadsched *xref)
{
struct thread *thread = thread_list_pop(&m->unuse);
struct cpu_thread_history tmp;
@@ -707,18 +704,17 @@ static struct thread *thread_get(struct thread_master *m, uint8_t type,
* This hopefully saves us some serious
* hash_get lookups.
*/
- if (thread->funcname != funcname || thread->func != func) {
+ if ((thread->xref && thread->xref->funcname != xref->funcname)
+ || thread->func != func) {
tmp.func = func;
- tmp.funcname = funcname;
+ tmp.funcname = xref->funcname;
thread->hist =
hash_get(m->cpu_record, &tmp,
(void *(*)(void *))cpu_record_hash_alloc);
}
thread->hist->total_active++;
thread->func = func;
- thread->funcname = funcname;
- thread->schedfrom = schedfrom;
- thread->schedfrom_line = fromln;
+ thread->xref = xref;
return thread;
}
@@ -832,21 +828,23 @@ done:
}
/* Add new read thread. */
-struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
- int (*func)(struct thread *),
- void *arg, int fd,
- struct thread **t_ptr,
- debugargdef)
+struct thread *_thread_add_read_write(const struct xref_threadsched *xref,
+ struct thread_master *m,
+ int (*func)(struct thread *),
+ void *arg, int fd, struct thread **t_ptr)
{
+ int dir = xref->thread_type;
struct thread *thread = NULL;
struct thread **thread_array;
if (dir == THREAD_READ)
- frrtrace(9, frr_libfrr, schedule_read, m, funcname, schedfrom,
- fromln, t_ptr, fd, 0, arg, 0);
+ frrtrace(9, frr_libfrr, schedule_read, m,
+ xref->funcname, xref->xref.file, xref->xref.line,
+ t_ptr, fd, 0, arg, 0);
else
- frrtrace(9, frr_libfrr, schedule_write, m, funcname, schedfrom,
- fromln, t_ptr, fd, 0, arg, 0);
+ frrtrace(9, frr_libfrr, schedule_write, m,
+ xref->funcname, xref->xref.file, xref->xref.line,
+ t_ptr, fd, 0, arg, 0);
assert(fd >= 0 && fd < m->fd_limit);
frr_with_mutex(&m->mtx) {
@@ -882,7 +880,7 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
/* make sure we have room for this fd + pipe poker fd */
assert(queuepos + 1 < m->handler.pfdsize);
- thread = thread_get(m, dir, func, arg, debugargpass);
+ thread = thread_get(m, dir, func, arg, xref);
m->handler.pfds[queuepos].fd = fd;
m->handler.pfds[queuepos].events |=
@@ -910,10 +908,10 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
}
static struct thread *
-funcname_thread_add_timer_timeval(struct thread_master *m,
- int (*func)(struct thread *), int type,
- void *arg, struct timeval *time_relative,
- struct thread **t_ptr, debugargdef)
+_thread_add_timer_timeval(const struct xref_threadsched *xref,
+ struct thread_master *m, int (*func)(struct thread *),
+ int type, void *arg, struct timeval *time_relative,
+ struct thread **t_ptr)
{
struct thread *thread;
@@ -922,7 +920,8 @@ funcname_thread_add_timer_timeval(struct thread_master *m,
assert(type == THREAD_TIMER);
assert(time_relative);
- frrtrace(9, frr_libfrr, schedule_timer, m, funcname, schedfrom, fromln,
+ frrtrace(9, frr_libfrr, schedule_timer, m,
+ xref->funcname, xref->xref.file, xref->xref.line,
t_ptr, 0, 0, arg, (long)time_relative->tv_sec);
frr_with_mutex(&m->mtx) {
@@ -930,7 +929,7 @@ funcname_thread_add_timer_timeval(struct thread_master *m,
/* thread is already scheduled; don't reschedule */
return NULL;
- thread = thread_get(m, type, func, arg, debugargpass);
+ thread = thread_get(m, type, func, arg, xref);
frr_with_mutex(&thread->mtx) {
monotime(&thread->u.sands);
@@ -951,10 +950,10 @@ funcname_thread_add_timer_timeval(struct thread_master *m,
/* Add timer event thread. */
-struct thread *funcname_thread_add_timer(struct thread_master *m,
- int (*func)(struct thread *),
- void *arg, long timer,
- struct thread **t_ptr, debugargdef)
+struct thread *_thread_add_timer(const struct xref_threadsched *xref,
+ struct thread_master *m,
+ int (*func)(struct thread *),
+ void *arg, long timer, struct thread **t_ptr)
{
struct timeval trel;
@@ -963,16 +962,16 @@ struct thread *funcname_thread_add_timer(struct thread_master *m,
trel.tv_sec = timer;
trel.tv_usec = 0;
- return funcname_thread_add_timer_timeval(m, func, THREAD_TIMER, arg,
- &trel, t_ptr, debugargpass);
+ return _thread_add_timer_timeval(xref, m, func, THREAD_TIMER, arg,
+ &trel, t_ptr);
}
/* Add timer event thread with "millisecond" resolution */
-struct thread *funcname_thread_add_timer_msec(struct thread_master *m,
- int (*func)(struct thread *),
- void *arg, long timer,
- struct thread **t_ptr,
- debugargdef)
+struct thread *_thread_add_timer_msec(const struct xref_threadsched *xref,
+ struct thread_master *m,
+ int (*func)(struct thread *),
+ void *arg, long timer,
+ struct thread **t_ptr)
{
struct timeval trel;
@@ -981,29 +980,31 @@ struct thread *funcname_thread_add_timer_msec(struct thread_master *m,
trel.tv_sec = timer / 1000;
trel.tv_usec = 1000 * (timer % 1000);
- return funcname_thread_add_timer_timeval(m, func, THREAD_TIMER, arg,
- &trel, t_ptr, debugargpass);
+ return _thread_add_timer_timeval(xref, m, func, THREAD_TIMER, arg,
+ &trel, t_ptr);
}
/* Add timer event thread with "millisecond" resolution */
-struct thread *funcname_thread_add_timer_tv(struct thread_master *m,
- int (*func)(struct thread *),
- void *arg, struct timeval *tv,
- struct thread **t_ptr, debugargdef)
+struct thread *_thread_add_timer_tv(const struct xref_threadsched *xref,
+ struct thread_master *m,
+ int (*func)(struct thread *),
+ void *arg, struct timeval *tv,
+ struct thread **t_ptr)
{
- return funcname_thread_add_timer_timeval(m, func, THREAD_TIMER, arg, tv,
- t_ptr, debugargpass);
+ return _thread_add_timer_timeval(xref, m, func, THREAD_TIMER, arg, tv,
+ t_ptr);
}
/* Add simple event thread. */
-struct thread *funcname_thread_add_event(struct thread_master *m,
- int (*func)(struct thread *),
- void *arg, int val,
- struct thread **t_ptr, debugargdef)
+struct thread *_thread_add_event(const struct xref_threadsched *xref,
+ struct thread_master *m,
+ int (*func)(struct thread *),
+ void *arg, int val, struct thread **t_ptr)
{
struct thread *thread = NULL;
- frrtrace(9, frr_libfrr, schedule_event, m, funcname, schedfrom, fromln,
+ frrtrace(9, frr_libfrr, schedule_event, m,
+ xref->funcname, xref->xref.file, xref->xref.line,
t_ptr, 0, val, arg, 0);
assert(m != NULL);
@@ -1013,7 +1014,7 @@ struct thread *funcname_thread_add_event(struct thread_master *m,
/* thread is already scheduled; don't reschedule */
break;
- thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass);
+ thread = thread_get(m, THREAD_EVENT, func, arg, xref);
frr_with_mutex(&thread->mtx) {
thread->u.val = val;
thread_list_add_tail(&m->event, thread);
@@ -1239,8 +1240,9 @@ void thread_cancel(struct thread **thread)
master = (*thread)->master;
- frrtrace(9, frr_libfrr, thread_cancel, master, (*thread)->funcname,
- (*thread)->schedfrom, (*thread)->schedfrom_line, NULL, (*thread)->u.fd,
+ frrtrace(9, frr_libfrr, thread_cancel, master,
+ (*thread)->xref->funcname, (*thread)->xref->xref.file,
+ (*thread)->xref->xref.line, NULL, (*thread)->u.fd,
(*thread)->u.val, (*thread)->arg, (*thread)->u.sands.tv_sec);
assert(master->owner == pthread_self());
@@ -1287,8 +1289,8 @@ void thread_cancel_async(struct thread_master *master, struct thread **thread,
if (thread && *thread)
frrtrace(9, frr_libfrr, thread_cancel_async, master,
- (*thread)->funcname, (*thread)->schedfrom,
- (*thread)->schedfrom_line, NULL, (*thread)->u.fd,
+ (*thread)->xref->funcname, (*thread)->xref->xref.file,
+ (*thread)->xref->xref.line, NULL, (*thread)->u.fd,
(*thread)->u.val, (*thread)->arg,
(*thread)->u.sands.tv_sec);
else
@@ -1363,7 +1365,7 @@ static int thread_process_io_helper(struct thread_master *m,
if (!thread) {
if ((actual_state & (POLLHUP|POLLIN)) != POLLHUP)
flog_err(EC_LIB_NO_THREAD,
- "Attempting to process an I/O event but for fd: %d(%d) no thread to handle this!\n",
+ "Attempting to process an I/O event but for fd: %d(%d) no thread to handle this!",
m->handler.pfds[pos].fd, actual_state);
return 0;
}
@@ -1673,8 +1675,9 @@ void thread_call(struct thread *thread)
GETRUSAGE(&before);
thread->real = before.real;
- frrtrace(9, frr_libfrr, thread_call, thread->master, thread->funcname,
- thread->schedfrom, thread->schedfrom_line, NULL, thread->u.fd,
+ frrtrace(9, frr_libfrr, thread_call, thread->master,
+ thread->xref->funcname, thread->xref->xref.file,
+ thread->xref->xref.line, NULL, thread->u.fd,
thread->u.val, thread->arg, thread->u.sands.tv_sec);
pthread_setspecific(thread_current, thread);
@@ -1724,7 +1727,7 @@ void thread_call(struct thread *thread)
flog_warn(
EC_LIB_SLOW_THREAD,
"SLOW THREAD: task %s (%lx) ran for %lums (cpu time %lums)",
- thread->funcname, (unsigned long)thread->func,
+ thread->xref->funcname, (unsigned long)thread->func,
realtime / 1000, cputime / 1000);
}
#endif /* CONSUMED_TIME_CHECK */
@@ -1732,15 +1735,15 @@ void thread_call(struct thread *thread)
}
/* Execute thread */
-void funcname_thread_execute(struct thread_master *m,
- int (*func)(struct thread *), void *arg, int val,
- debugargdef)
+void _thread_execute(const struct xref_threadsched *xref,
+ struct thread_master *m, int (*func)(struct thread *),
+ void *arg, int val)
{
struct thread *thread;
/* Get or allocate new thread to execute. */
frr_with_mutex(&m->mtx) {
- thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass);
+ thread = thread_get(m, THREAD_EVENT, func, arg, xref);
/* Set its event value. */
frr_with_mutex(&thread->mtx) {
diff --git a/lib/thread.h b/lib/thread.h
index eb1b107e7b..6b510fc4c9 100644
--- a/lib/thread.h
+++ b/lib/thread.h
@@ -27,6 +27,7 @@
#include "monotime.h"
#include "frratomic.h"
#include "typesafe.h"
+#include "xref.h"
#ifdef __cplusplus
extern "C" {
@@ -66,6 +67,14 @@ struct cancel_req {
struct thread **threadref;
};
+struct xref_threadsched {
+ struct xref xref;
+
+ const char *funcname;
+ const char *dest;
+ uint32_t thread_type;
+};
+
/* Master of the theads. */
struct thread_master {
char *name;
@@ -107,9 +116,7 @@ struct thread {
struct timeval real;
struct cpu_thread_history *hist; /* cache pointer to cpu_history */
unsigned long yield; /* yield time in microseconds */
- const char *funcname; /* name of thread function */
- const char *schedfrom; /* source file thread was scheduled from */
- int schedfrom_line; /* line number of source file */
+ const struct xref_threadsched *xref; /* origin location */
pthread_mutex_t mtx; /* mutex for thread.c functions */
};
@@ -156,17 +163,45 @@ struct cpu_thread_history {
thread_cancel(&(thread)); \
} while (0)
-#define debugargdef const char *funcname, const char *schedfrom, int fromln
-
-#define thread_add_read(m,f,a,v,t) funcname_thread_add_read_write(THREAD_READ,m,f,a,v,t,#f,__FILE__,__LINE__)
-#define thread_add_write(m,f,a,v,t) funcname_thread_add_read_write(THREAD_WRITE,m,f,a,v,t,#f,__FILE__,__LINE__)
-#define thread_add_timer(m,f,a,v,t) funcname_thread_add_timer(m,f,a,v,t,#f,__FILE__,__LINE__)
-#define thread_add_timer_msec(m,f,a,v,t) funcname_thread_add_timer_msec(m,f,a,v,t,#f,__FILE__,__LINE__)
-#define thread_add_timer_tv(m,f,a,v,t) funcname_thread_add_timer_tv(m,f,a,v,t,#f,__FILE__,__LINE__)
-#define thread_add_event(m,f,a,v,t) funcname_thread_add_event(m,f,a,v,t,#f,__FILE__,__LINE__)
-#define thread_execute(m,f,a,v) funcname_thread_execute(m,f,a,v,#f,__FILE__,__LINE__)
-#define thread_execute_name(m, f, a, v, n) \
- funcname_thread_execute(m, f, a, v, n, __FILE__, __LINE__)
+/*
+ * Macro wrappers to generate xrefs for all thread add calls. Includes
+ * file/line/function info for debugging/tracing.
+ */
+#include "lib/xref.h"
+
+#define _xref_t_a(addfn, type, m, f, a, v, t) \
+ ({ \
+ static const struct xref_threadsched _xref \
+ __attribute__((used)) = { \
+ .xref = XREF_INIT(XREFT_THREADSCHED, NULL, __func__), \
+ .funcname = #f, \
+ .dest = #t, \
+ .thread_type = THREAD_ ## type, \
+ }; \
+ XREF_LINK(_xref.xref); \
+ _thread_add_ ## addfn(&_xref, m, f, a, v, t); \
+ }) \
+ /* end */
+
+#define thread_add_read(m,f,a,v,t) _xref_t_a(read_write, READ, m,f,a,v,t)
+#define thread_add_write(m,f,a,v,t) _xref_t_a(read_write, WRITE, m,f,a,v,t)
+#define thread_add_timer(m,f,a,v,t) _xref_t_a(timer, TIMER, m,f,a,v,t)
+#define thread_add_timer_msec(m,f,a,v,t) _xref_t_a(timer_msec, TIMER, m,f,a,v,t)
+#define thread_add_timer_tv(m,f,a,v,t) _xref_t_a(timer_tv, TIMER, m,f,a,v,t)
+#define thread_add_event(m,f,a,v,t) _xref_t_a(event, TIMER, m,f,a,v,t)
+
+#define thread_execute(m,f,a,v) \
+ ({ \
+ static const struct xref_threadsched _xref \
+ __attribute__((used)) = { \
+ .xref = XREF_INIT(XREFT_THREADSCHED, NULL, __func__), \
+ .funcname = #f, \
+ .dest = NULL, \
+ .thread_type = THREAD_EXECUTE, \
+ }; \
+ XREF_LINK(_xref.xref); \
+ _thread_execute(&_xref, m, f, a, v); \
+ }) /* end */
/* Prototypes. */
extern struct thread_master *thread_master_create(const char *);
@@ -174,35 +209,30 @@ void thread_master_set_name(struct thread_master *master, const char *name);
extern void thread_master_free(struct thread_master *);
extern void thread_master_free_unused(struct thread_master *);
-extern struct thread *
-funcname_thread_add_read_write(int dir, struct thread_master *,
- int (*)(struct thread *), void *, int,
- struct thread **, debugargdef);
-
-extern struct thread *funcname_thread_add_timer(struct thread_master *,
- int (*)(struct thread *),
- void *, long, struct thread **,
- debugargdef);
-
-extern struct thread *
-funcname_thread_add_timer_msec(struct thread_master *, int (*)(struct thread *),
- void *, long, struct thread **, debugargdef);
-
-extern struct thread *funcname_thread_add_timer_tv(struct thread_master *,
- int (*)(struct thread *),
- void *, struct timeval *,
- struct thread **,
- debugargdef);
-
-extern struct thread *funcname_thread_add_event(struct thread_master *,
- int (*)(struct thread *),
- void *, int, struct thread **,
- debugargdef);
-
-extern void funcname_thread_execute(struct thread_master *,
- int (*)(struct thread *), void *, int,
- debugargdef);
-#undef debugargdef
+extern struct thread *_thread_add_read_write(
+ const struct xref_threadsched *xref, struct thread_master *master,
+ int (*fn)(struct thread *), void *arg, int fd, struct thread **tref);
+
+extern struct thread *_thread_add_timer(
+ const struct xref_threadsched *xref, struct thread_master *master,
+ int (*fn)(struct thread *), void *arg, long t, struct thread **tref);
+
+extern struct thread *_thread_add_timer_msec(
+ const struct xref_threadsched *xref, struct thread_master *master,
+ int (*fn)(struct thread *), void *arg, long t, struct thread **tref);
+
+extern struct thread *_thread_add_timer_tv(
+ const struct xref_threadsched *xref, struct thread_master *master,
+ int (*fn)(struct thread *), void *arg, struct timeval *tv,
+ struct thread **tref);
+
+extern struct thread *_thread_add_event(
+ const struct xref_threadsched *xref, struct thread_master *master,
+ int (*fn)(struct thread *), void *arg, int val, struct thread **tref);
+
+extern void _thread_execute(const struct xref_threadsched *xref,
+ struct thread_master *master,
+ int (*fn)(struct thread *), void *arg, int val);
extern void thread_cancel(struct thread **event);
extern void thread_cancel_async(struct thread_master *, struct thread **,
diff --git a/lib/vrf.c b/lib/vrf.c
index 1a9cd7e451..136938783f 100644
--- a/lib/vrf.c
+++ b/lib/vrf.c
@@ -214,6 +214,53 @@ struct vrf *vrf_get(vrf_id_t vrf_id, const char *name)
return vrf;
}
+/* Update a VRF. If not found, create one.
+ * Arg:
+ * name - The name of the vrf.
+ * vrf_id - The vrf_id of the vrf.
+ * Description: This function first finds the vrf using its name. If the vrf is
+ * found and the vrf-id of the existing vrf does not match the new vrf id, it
+ * will disable the existing vrf and update it with new vrf-id. If the vrf is
+ * not found, it will create the vrf with given name and the new vrf id.
+ */
+struct vrf *vrf_update(vrf_id_t new_vrf_id, const char *name)
+{
+ struct vrf *vrf = NULL;
+
+ /*Treat VRF add for existing vrf as update
+ * Update VRF ID and also update in VRF ID table
+ */
+ if (name)
+ vrf = vrf_lookup_by_name(name);
+ if (vrf && new_vrf_id != VRF_UNKNOWN && vrf->vrf_id != VRF_UNKNOWN
+ && vrf->vrf_id != new_vrf_id) {
+ if (debug_vrf) {
+ zlog_debug(
+ "Vrf Update event: %s old id: %u, new id: %u",
+ name, vrf->vrf_id, new_vrf_id);
+ }
+
+ /*Disable the vrf to simulate implicit delete
+ * so that all stale routes are deleted
+ * This vrf will be enabled down the line
+ */
+ vrf_disable(vrf);
+
+
+ RB_REMOVE(vrf_id_head, &vrfs_by_id, vrf);
+ vrf->vrf_id = new_vrf_id;
+ RB_INSERT(vrf_id_head, &vrfs_by_id, vrf);
+
+ } else {
+
+ /*
+ * vrf_get is implied creation if it does not exist
+ */
+ vrf = vrf_get(new_vrf_id, name);
+ }
+ return vrf;
+}
+
/* Delete a VRF. This is called when the underlying VRF goes away, a
* pre-configured VRF is deleted or when shutting down (vrf_terminate()).
*/
@@ -628,12 +675,12 @@ int vrf_handler_create(struct vty *vty, const char *vrfname,
if (strlen(vrfname) > VRF_NAMSIZ) {
if (vty)
vty_out(vty,
- "%% VRF name %s invalid: length exceeds %d bytes\n",
+ "%% VRF name %s invalid: length exceeds %d bytes",
vrfname, VRF_NAMSIZ);
else
flog_warn(
EC_LIB_VRF_LENGTH,
- "%% VRF name %s invalid: length exceeds %d bytes\n",
+ "%% VRF name %s invalid: length exceeds %d bytes",
vrfname, VRF_NAMSIZ);
return CMD_WARNING_CONFIG_FAILED;
}
@@ -1064,6 +1111,7 @@ static int lib_vrf_create(struct nb_cb_create_args *args)
vrfp = vrf_get(VRF_UNKNOWN, vrfname);
+ vrf_set_user_cfged(vrfp);
nb_running_set_entry(args->dnode, vrfp);
return NB_OK;
@@ -1089,7 +1137,7 @@ static int lib_vrf_destroy(struct nb_cb_destroy_args *args)
vrfp = nb_running_unset_entry(args->dnode);
/* Clear configured flag and invoke delete. */
- UNSET_FLAG(vrfp->status, VRF_CONFIGURED);
+ vrf_reset_user_cfged(vrfp);
vrf_delete(vrfp);
break;
}
diff --git a/lib/vrf.h b/lib/vrf.h
index c636b9ea7e..32e6fb4289 100644
--- a/lib/vrf.h
+++ b/lib/vrf.h
@@ -114,6 +114,7 @@ extern struct vrf_name_head vrfs_by_name;
extern struct vrf *vrf_lookup_by_id(vrf_id_t);
extern struct vrf *vrf_lookup_by_name(const char *);
extern struct vrf *vrf_get(vrf_id_t, const char *);
+extern struct vrf *vrf_update(vrf_id_t new_vrf_id, const char *name);
extern const char *vrf_id_to_name(vrf_id_t vrf_id);
extern vrf_id_t vrf_name_to_id(const char *);
@@ -167,6 +168,20 @@ static inline void vrf_reset_user_cfged(struct vrf *vrf)
UNSET_FLAG(vrf->status, VRF_CONFIGURED);
}
+static inline uint32_t vrf_interface_count(struct vrf *vrf)
+{
+ uint32_t count = 0;
+ struct interface *ifp;
+
+ RB_FOREACH (ifp, if_name_head, &vrf->ifaces_by_name) {
+ /* skip the l3mdev */
+ if (strncmp(ifp->name, vrf->name, VRF_NAMSIZ) == 0)
+ continue;
+ count++;
+ }
+ return count;
+}
+
/*
* Utilities to obtain the user data
*/
diff --git a/lib/vty.c b/lib/vty.c
index 21b3d47b09..65f8d78a96 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -73,7 +73,8 @@ enum event {
#endif /* VTYSH */
};
-static void vty_event(enum event, int, struct vty *);
+static void vty_event_serv(enum event event, int sock);
+static void vty_event(enum event, struct vty *);
/* Extern host structure from command.c */
extern struct host host;
@@ -396,16 +397,6 @@ static void vty_do_window_size(struct vty *vty)
vty_out(vty, "%s", cmd);
}
-#if 0 /* Currently not used. */
-/* Make don't use lflow vty interface. */
-static void
-vty_dont_lflow_ahead (struct vty *vty)
-{
- unsigned char cmd[] = { IAC, DONT, TELOPT_LFLOW, '\0' };
- vty_out (vty, "%s", cmd);
-}
-#endif /* 0 */
-
/* Authentication of vty */
static void vty_auth(struct vty *vty, char *buf)
{
@@ -1090,11 +1081,6 @@ static void vty_describe_command(struct vty *vty)
vector_free(varcomps);
}
-#if 0
- vty_out (vty, " %-*s %s\n", width
- desc->cmd[0] == '.' ? desc->cmd + 1 : desc->cmd,
- desc->str ? desc->str : "");
-#endif /* 0 */
}
if ((token = token_cr)) {
@@ -1299,6 +1285,7 @@ static int vty_execute(struct vty *vty)
#define VTY_NORMAL 0
#define VTY_PRE_ESCAPE 1
#define VTY_ESCAPE 2
+#define VTY_CR 3
/* Escape character command map. */
static void vty_escape_map(unsigned char c, struct vty *vty)
@@ -1340,14 +1327,13 @@ static int vty_read(struct thread *thread)
int nbytes;
unsigned char buf[VTY_READ_BUFSIZ];
- int vty_sock = THREAD_FD(thread);
struct vty *vty = THREAD_ARG(thread);
/* Read raw data from socket */
if ((nbytes = read(vty->fd, buf, VTY_READ_BUFSIZ)) <= 0) {
if (nbytes < 0) {
if (ERRNO_IO_RETRY(errno)) {
- vty_event(VTY_READ, vty_sock, vty);
+ vty_event(VTY_READ, vty);
return 0;
}
vty->monitor = 0; /* disable monitoring to avoid
@@ -1396,12 +1382,6 @@ static int vty_read(struct thread *thread)
case 'Q':
vty_buffer_reset(vty);
break;
-#if 0 /* More line does not work for "show ip bgp". */
- case '\n':
- case '\r':
- vty->status = VTY_MORELINE;
- break;
-#endif
default:
break;
}
@@ -1444,6 +1424,17 @@ static int vty_read(struct thread *thread)
continue;
}
+ if (vty->escape == VTY_CR) {
+ /* if we get CR+NL, the NL results in an extra empty
+ * prompt line being printed without this; just drop
+ * the NL if it immediately follows CR.
+ */
+ vty->escape = VTY_NORMAL;
+
+ if (buf[i] == '\n')
+ continue;
+ }
+
switch (buf[i]) {
case CONTROL('A'):
vty_beginning_of_line(vty);
@@ -1488,9 +1479,12 @@ static int vty_read(struct thread *thread)
case CONTROL('Z'):
vty_end_config(vty);
break;
- case '\n':
case '\r':
+ vty->escape = VTY_CR;
+ /* fallthru */
+ case '\n':
vty_out(vty, "\n");
+ buffer_flush_available(vty->obuf, vty->wfd);
vty_execute(vty);
break;
case '\t':
@@ -1521,8 +1515,8 @@ static int vty_read(struct thread *thread)
if (vty->status == VTY_CLOSE)
vty_close(vty);
else {
- vty_event(VTY_WRITE, vty->wfd, vty);
- vty_event(VTY_READ, vty_sock, vty);
+ vty_event(VTY_WRITE, vty);
+ vty_event(VTY_READ, vty);
}
return 0;
}
@@ -1532,7 +1526,6 @@ static int vty_flush(struct thread *thread)
{
int erase;
buffer_status_t flushrc;
- int vty_sock = THREAD_FD(thread);
struct vty *vty = THREAD_ARG(thread);
/* Tempolary disable read thread. */
@@ -1544,20 +1537,20 @@ static int vty_flush(struct thread *thread)
/* N.B. if width is 0, that means we don't know the window size. */
if ((vty->lines == 0) || (vty->width == 0) || (vty->height == 0))
- flushrc = buffer_flush_available(vty->obuf, vty_sock);
+ flushrc = buffer_flush_available(vty->obuf, vty->wfd);
else if (vty->status == VTY_MORELINE)
- flushrc = buffer_flush_window(vty->obuf, vty_sock, vty->width,
+ flushrc = buffer_flush_window(vty->obuf, vty->wfd, vty->width,
1, erase, 0);
else
flushrc = buffer_flush_window(
- vty->obuf, vty_sock, vty->width,
+ vty->obuf, vty->wfd, vty->width,
vty->lines >= 0 ? vty->lines : vty->height, erase, 0);
switch (flushrc) {
case BUFFER_ERROR:
vty->monitor =
0; /* disable monitoring to avoid infinite recursion */
- zlog_info("buffer_flush failed on vty client fd %d, closing",
- vty->fd);
+ zlog_info("buffer_flush failed on vty client fd %d/%d, closing",
+ vty->fd, vty->wfd);
buffer_reset(vty->lbuf);
buffer_reset(vty->obuf);
vty_close(vty);
@@ -1568,14 +1561,14 @@ static int vty_flush(struct thread *thread)
else {
vty->status = VTY_NORMAL;
if (vty->lines == 0)
- vty_event(VTY_READ, vty_sock, vty);
+ vty_event(VTY_READ, vty);
}
break;
case BUFFER_PENDING:
/* There is more data waiting to be written. */
vty->status = VTY_MORE;
if (vty->lines == 0)
- vty_event(VTY_WRITE, vty_sock, vty);
+ vty_event(VTY_WRITE, vty);
break;
}
@@ -1678,8 +1671,8 @@ static struct vty *vty_create(int vty_sock, union sockunion *su)
vty_prompt(vty);
/* Add read/write thread. */
- vty_event(VTY_WRITE, vty_sock, vty);
- vty_event(VTY_READ, vty_sock, vty);
+ vty_event(VTY_WRITE, vty);
+ vty_event(VTY_READ, vty);
return vty;
}
@@ -1735,7 +1728,6 @@ void vty_stdio_resume(void)
termios = stdio_orig_termios;
termios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR
| IGNCR | ICRNL | IXON);
- termios.c_oflag &= ~OPOST;
termios.c_lflag &= ~(ECHO | ECHONL | ICANON | IEXTEN);
termios.c_cflag &= ~(CSIZE | PARENB);
termios.c_cflag |= CS8;
@@ -1746,8 +1738,8 @@ void vty_stdio_resume(void)
vty_prompt(stdio_vty);
/* Add read/write thread. */
- vty_event(VTY_WRITE, 1, stdio_vty);
- vty_event(VTY_READ, 0, stdio_vty);
+ vty_event(VTY_WRITE, stdio_vty);
+ vty_event(VTY_READ, stdio_vty);
}
void vty_stdio_close(void)
@@ -1796,7 +1788,7 @@ static int vty_accept(struct thread *thread)
accept_sock = THREAD_FD(thread);
/* We continue hearing vty socket. */
- vty_event(VTY_SERV, accept_sock, NULL);
+ vty_event_serv(VTY_SERV, accept_sock);
memset(&su, 0, sizeof(union sockunion));
@@ -1826,7 +1818,7 @@ static int vty_accept(struct thread *thread)
close(vty_sock);
/* continue accepting connections */
- vty_event(VTY_SERV, accept_sock, NULL);
+ vty_event_serv(VTY_SERV, accept_sock);
return 0;
}
@@ -1842,7 +1834,7 @@ static int vty_accept(struct thread *thread)
close(vty_sock);
/* continue accepting connections */
- vty_event(VTY_SERV, accept_sock, NULL);
+ vty_event_serv(VTY_SERV, accept_sock);
return 0;
}
@@ -1915,7 +1907,7 @@ static void vty_serv_sock_addrinfo(const char *hostname, unsigned short port)
continue;
}
- vty_event(VTY_SERV, sock, NULL);
+ vty_event_serv(VTY_SERV, sock);
} while ((ainfo = ainfo->ai_next) != NULL);
freeaddrinfo(ainfo_save);
@@ -1993,7 +1985,7 @@ static void vty_serv_un(const char *path)
}
}
- vty_event(VTYSH_SERV, sock, NULL);
+ vty_event_serv(VTYSH_SERV, sock);
}
/* #define VTYSH_DEBUG 1 */
@@ -2008,7 +2000,7 @@ static int vtysh_accept(struct thread *thread)
accept_sock = THREAD_FD(thread);
- vty_event(VTYSH_SERV, accept_sock, NULL);
+ vty_event_serv(VTYSH_SERV, accept_sock);
memset(&client, 0, sizeof(struct sockaddr_un));
client_len = sizeof(struct sockaddr_un);
@@ -2042,7 +2034,7 @@ static int vtysh_accept(struct thread *thread)
vty->type = VTY_SHELL_SERV;
vty->node = VIEW_NODE;
- vty_event(VTYSH_READ, sock, vty);
+ vty_event(VTYSH_READ, vty);
return 0;
}
@@ -2051,7 +2043,7 @@ static int vtysh_flush(struct vty *vty)
{
switch (buffer_flush_available(vty->obuf, vty->wfd)) {
case BUFFER_PENDING:
- vty_event(VTYSH_WRITE, vty->wfd, vty);
+ vty_event(VTYSH_WRITE, vty);
break;
case BUFFER_ERROR:
vty->monitor =
@@ -2084,7 +2076,7 @@ static int vtysh_read(struct thread *thread)
if ((nbytes = read(sock, buf, VTY_READ_BUFSIZ)) <= 0) {
if (nbytes < 0) {
if (ERRNO_IO_RETRY(errno)) {
- vty_event(VTYSH_READ, sock, vty);
+ vty_event(VTYSH_READ, vty);
return 0;
}
vty->monitor = 0; /* disable monitoring to avoid
@@ -2150,7 +2142,7 @@ static int vtysh_read(struct thread *thread)
if (vty->status == VTY_CLOSE)
vty_close(vty);
else
- vty_event(VTYSH_READ, sock, vty);
+ vty_event(VTYSH_READ, vty);
return 0;
}
@@ -2657,33 +2649,44 @@ int vty_config_node_exit(struct vty *vty)
/* Master of the threads. */
static struct thread_master *vty_master;
-static void vty_event(enum event event, int sock, struct vty *vty)
+static void vty_event_serv(enum event event, int sock)
{
struct thread *vty_serv_thread = NULL;
switch (event) {
case VTY_SERV:
- vty_serv_thread = thread_add_read(vty_master, vty_accept, vty,
- sock, NULL);
+ vty_serv_thread = thread_add_read(vty_master, vty_accept,
+ NULL, sock, NULL);
vector_set_index(Vvty_serv_thread, sock, vty_serv_thread);
break;
#ifdef VTYSH
case VTYSH_SERV:
- vty_serv_thread = thread_add_read(vty_master, vtysh_accept, vty,
- sock, NULL);
+ vty_serv_thread = thread_add_read(vty_master, vtysh_accept,
+ NULL, sock, NULL);
vector_set_index(Vvty_serv_thread, sock, vty_serv_thread);
break;
+#endif /* VTYSH */
+ default:
+ assert(!"vty_event_serv() called incorrectly");
+ }
+}
+
+static void vty_event(enum event event, struct vty *vty)
+{
+ switch (event) {
+#ifdef VTYSH
case VTYSH_READ:
- thread_add_read(vty_master, vtysh_read, vty, sock,
+ thread_add_read(vty_master, vtysh_read, vty, vty->fd,
&vty->t_read);
break;
case VTYSH_WRITE:
- thread_add_write(vty_master, vtysh_write, vty, sock,
+ thread_add_write(vty_master, vtysh_write, vty, vty->wfd,
&vty->t_write);
break;
#endif /* VTYSH */
case VTY_READ:
- thread_add_read(vty_master, vty_read, vty, sock, &vty->t_read);
+ thread_add_read(vty_master, vty_read, vty, vty->fd,
+ &vty->t_read);
/* Time out treatment. */
if (vty->v_timeout) {
@@ -2693,7 +2696,7 @@ static void vty_event(enum event event, int sock, struct vty *vty)
}
break;
case VTY_WRITE:
- thread_add_write(vty_master, vty_flush, vty, sock,
+ thread_add_write(vty_master, vty_flush, vty, vty->wfd,
&vty->t_write);
break;
case VTY_TIMEOUT_RESET:
@@ -2702,6 +2705,8 @@ static void vty_event(enum event event, int sock, struct vty *vty)
thread_add_timer(vty_master, vty_timeout, vty,
vty->v_timeout, &vty->t_timeout);
break;
+ default:
+ assert(!"vty_event() called incorrectly");
}
}
@@ -2748,7 +2753,7 @@ static int exec_timeout(struct vty *vty, const char *min_str,
vty_timeout_val = timeout;
vty->v_timeout = timeout;
- vty_event(VTY_TIMEOUT_RESET, 0, vty);
+ vty_event(VTY_TIMEOUT_RESET, vty);
return CMD_SUCCESS;
diff --git a/lib/wheel.c b/lib/wheel.c
index f5e5cc52c3..5bdd6292f9 100644
--- a/lib/wheel.c
+++ b/lib/wheel.c
@@ -73,8 +73,7 @@ static int wheel_timer_thread(struct thread *t)
wheel = THREAD_ARG(t);
- thread_execute_name(wheel->master, wheel_timer_thread_helper,
- wheel, 0, wheel->name);
+ thread_execute(wheel->master, wheel_timer_thread_helper, wheel, 0);
return 0;
}
diff --git a/lib/workqueue.c b/lib/workqueue.c
index f8e4677220..8eabdf52e7 100644
--- a/lib/workqueue.c
+++ b/lib/workqueue.c
@@ -377,11 +377,6 @@ stats:
if (yielded)
wq->yields++;
-#if 0
- printf ("%s: cycles %d, new: best %d, worst %d\n",
- __func__, cycles, wq->cycles.best, wq->cycles.granularity);
-#endif
-
/* Is the queue done yet? If it is, call the completion callback. */
if (!work_queue_empty(wq)) {
if (ret == WQ_RETRY_LATER ||
diff --git a/lib/xref.c b/lib/xref.c
new file mode 100644
index 0000000000..40efe51363
--- /dev/null
+++ b/lib/xref.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2017-20 David Lamparter, for NetDEF, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <pthread.h>
+#include <signal.h>
+#include <inttypes.h>
+
+#include "xref.h"
+#include "vty.h"
+#include "jhash.h"
+#include "sha256.h"
+#include "memory.h"
+#include "hash.h"
+
+struct xref_block *xref_blocks;
+static struct xref_block **xref_block_last = &xref_blocks;
+
+static void base32(uint8_t **inpos, int *bitpos,
+ char *out, size_t n_chars)
+{
+ static const char base32ch[] = "0123456789ABCDEFGHJKMNPQRSTVWXYZ";
+
+ char *opos = out;
+ uint8_t *in = *inpos;
+ int bp = *bitpos;
+
+ while (opos < out + n_chars) {
+ uint32_t bits = in[0] | (in[1] << 8);
+
+ if (bp == -1)
+ bits |= 0x10;
+ else
+ bits >>= bp;
+
+ *opos++ = base32ch[bits & 0x1f];
+
+ bp += 5;
+ if (bp >= 8)
+ in++, bp -= 8;
+ }
+ *opos = '\0';
+ *inpos = in;
+ *bitpos = bp;
+}
+
+static void xref_add_one(const struct xref *xref)
+{
+ SHA256_CTX sha;
+ struct xrefdata *xrefdata;
+
+ const char *filename, *p, *q;
+ uint8_t hash[32], *h = hash;
+ uint32_t be_val;
+ int bitpos;
+
+ if (!xref || !xref->xrefdata)
+ return;
+
+ xrefdata = xref->xrefdata;
+ xrefdata->xref = xref;
+
+ if (!xrefdata->hashstr)
+ return;
+
+ /* as far as the unique ID is concerned, only use the last
+ * directory name + filename, e.g. "bgpd/bgp_route.c". This
+ * gives a little leeway in moving things and avoids IDs being
+ * screwed up by out of tree builds or absolute pathnames.
+ */
+ filename = xref->file;
+ p = strrchr(filename, '/');
+ if (p) {
+ q = memrchr(filename, '/', p - filename);
+ if (q)
+ filename = q + 1;
+ else
+ filename = p + 1;
+ }
+
+ SHA256_Init(&sha);
+ SHA256_Update(&sha, filename, strlen(filename));
+ SHA256_Update(&sha, xrefdata->hashstr,
+ strlen(xrefdata->hashstr));
+ be_val = htonl(xrefdata->hashu32[0]);
+ SHA256_Update(&sha, &be_val, sizeof(be_val));
+ be_val = htonl(xrefdata->hashu32[1]);
+ SHA256_Update(&sha, &be_val, sizeof(be_val));
+ SHA256_Final(hash, &sha);
+
+ bitpos = -1;
+ base32(&h, &bitpos, &xrefdata->uid[0], 5);
+ xrefdata->uid[5] = '-';
+ base32(&h, &bitpos, &xrefdata->uid[6], 5);
+}
+
+void xref_gcc_workaround(const struct xref *xref)
+{
+ xref_add_one(xref);
+}
+
+void xref_block_add(struct xref_block *block)
+{
+ const struct xref * const *xrefp;
+
+ *xref_block_last = block;
+ xref_block_last = &block->next;
+
+ for (xrefp = block->start; xrefp < block->stop; xrefp++)
+ xref_add_one(*xrefp);
+}
diff --git a/lib/xref.h b/lib/xref.h
new file mode 100644
index 0000000000..b3243fa058
--- /dev/null
+++ b/lib/xref.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2017-20 David Lamparter, for NetDEF, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _FRR_XREF_H
+#define _FRR_XREF_H
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <errno.h>
+#include "compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum xref_type {
+ XREFT_NONE = 0,
+
+ XREFT_THREADSCHED = 0x100,
+
+ XREFT_LOGMSG = 0x200,
+
+ XREFT_DEFUN = 0x300,
+ XREFT_INSTALL_ELEMENT = 0x301,
+};
+
+/* struct xref is the "const" part; struct xrefdata is the writable part. */
+struct xref;
+struct xrefdata;
+
+struct xref {
+ /* this may be NULL, depending on the type of the xref.
+ * if it is NULL, the xref has no unique ID and cannot be accessed
+ * through that mechanism.
+ */
+ struct xrefdata *xrefdata;
+
+ /* type isn't generally needed at runtime */
+ enum xref_type type;
+
+ /* code location */
+ int line;
+ const char *file;
+ const char *func;
+
+ /* -- 32 bytes (on 64bit) -- */
+
+ /* type-specific bits appended by embedding this struct */
+};
+
+struct xrefdata {
+ /* pointer back to the const part; this will be initialized at
+ * program startup by xref_block_add(). (Creating structs with
+ * cyclic pointers to each other is not easily possible for
+ * function-scoped static variables.)
+ *
+ * There is no xrefdata w/o xref, but there are xref w/o xrefdata.
+ */
+ const struct xref *xref;
+
+ /* base32(crockford) of unique ID. not all bytes are used, but
+ * let's pad to 16 for simplicity
+ */
+ char uid[16];
+
+ /* hash/uid input
+ * if hashstr is NULL, no UID is assigned/calculated. Use macro
+ * string concatenation if multiple values need to be fed in.
+ * (This is here to not make the UID calculation independent of
+ * xref type.)
+ */
+ const char *hashstr;
+ uint32_t hashu32[2];
+
+ /* -- 32 bytes (on 64bit) -- */
+};
+
+/* linker "magic" is used to create an array of pointers to struct xref.
+ * the result is a contiguous block of pointers, each pointing to an xref
+ * somewhere in the code. The linker gives us start and end pointers, we
+ * stuff those into the struct below and hook up a constructor to run at
+ * program startup with the struct passed.
+ *
+ * Placing the xrefs themselves into an array doesn't work because they'd
+ * need to be constant size, but we're embedding struct xref into other
+ * container structs with extra data. Also this means that external code
+ * (like the python xref dumper) can safely ignore extra data at the end of
+ * xrefs without needing to account for size in iterating the array.
+ *
+ * If you're curious, this is also how __attribute__((constructor)) (and
+ * destructor) are implemented - there are 2 arrays, ".init_array" and
+ * ".fini_array", containing function pointers. The magic turns out to be
+ * quite mundane, actually ;)
+ *
+ * The slightly tricky bit is that this is a per-object (i.e. per shared
+ * library & daemon) thing and we need a bit of help (in XREF_SETUP) to
+ * initialize correctly.
+ */
+
+struct xref_block {
+ struct xref_block *next;
+ const struct xref * const *start;
+ const struct xref * const *stop;
+};
+
+extern struct xref_block *xref_blocks;
+extern void xref_block_add(struct xref_block *block);
+extern void xref_gcc_workaround(const struct xref *xref);
+
+#ifndef HAVE_SECTION_SYMS
+/* we have a build system patch to use GNU ld on Solaris; if that doesn't
+ * work we end up on Solaris ld which doesn't support the section start/end
+ * symbols.
+ */
+#define XREF_SETUP() \
+ CPP_NOTICE("Missing linker support for section arrays. Solaris ld?")
+#else
+/* the actual symbols that the linker provides for us. Note these are
+ * _symbols_ referring to the actual section start/end, i.e. they are very
+ * much NOT _pointers_, rather the symbol *value* is the pointer. Declaring
+ * them as size-1 arrays is the "best" / "right" thing.
+ */
+extern const struct xref * const __start_xref_array[1] DSO_LOCAL;
+extern const struct xref * const __stop_xref_array[1] DSO_LOCAL;
+
+/* this macro is invoked once for each standalone DSO through
+ * FRR_MODULE_SETUP \
+ * }-> FRR_COREMOD_SETUP -> XREF_SETUP
+ * FRR_DAEMON_INFO /
+ */
+#define XREF_SETUP() \
+ static const struct xref _dummy_xref = { \
+ /* .xrefdata = */ NULL, \
+ /* .type = */ XREFT_NONE, \
+ /* .line = */ __LINE__, \
+ /* .file = */ __FILE__, \
+ /* .func = */ "dummy", \
+ }; \
+ static const struct xref * const _dummy_xref_p \
+ __attribute__((used, section("xref_array"))) \
+ = &_dummy_xref; \
+ static void __attribute__((used, _CONSTRUCTOR(1100))) \
+ _xref_init(void) { \
+ static struct xref_block _xref_block = { \
+ .start = __start_xref_array, \
+ .stop = __stop_xref_array, \
+ }; \
+ xref_block_add(&_xref_block); \
+ } \
+ asm(XREF_NOTE); \
+ /* end */
+
+/* the following blurb emits an ELF note indicating start and end of the xref
+ * array in the binary. This is technically the "correct" entry point for
+ * external tools reading xrefs out of an ELF shared library or executable.
+ *
+ * right now, the extraction tools use the section header for "xref_array"
+ * instead; however, section headers are technically not necessarily preserved
+ * for fully linked libraries or executables. (In practice they are only
+ * stripped by obfuscation tools.)
+ *
+ * conversely, for reading xrefs out of a single relocatable object file (e.g.
+ * bar.o), section headers are the right thing to look at since the note is
+ * only emitted for the final binary once.
+ *
+ * FRR itself does not need this note to operate correctly, so if you have
+ * some build issue with it just add -DFRR_XREF_NO_NOTE to your build flags
+ * to disable it.
+ */
+#ifdef FRR_XREF_NO_NOTE
+#define XREF_NOTE ""
+#else
+
+#if __SIZEOF_POINTER__ == 4
+#define _NOTE_2PTRSIZE "8"
+#define _NOTE_PTR ".long"
+#elif __SIZEOF_POINTER__ == 8
+#define _NOTE_2PTRSIZE "16"
+#define _NOTE_PTR ".quad"
+#else
+#error unsupported pointer size
+#endif
+
+#ifdef __arm__
+# define asmspecial "%"
+#else
+# define asmspecial "@"
+#endif
+
+#define XREF_NOTE \
+ "" "\n"\
+ " .type _frr_xref_note," asmspecial "object" "\n"\
+ " .pushsection .note.FRR,\"a\"," asmspecial "note" "\n"\
+ " .p2align 2" "\n"\
+ "_frr_xref_note:" "\n"\
+ " .long 9" "\n"\
+ " .long " _NOTE_2PTRSIZE "\n"\
+ " .ascii \"XREF\"" "\n"\
+ " .ascii \"FRRouting\\0\\0\\0\"" "\n"\
+ " " _NOTE_PTR " __start_xref_array-." "\n"\
+ " " _NOTE_PTR " __stop_xref_array-." "\n"\
+ " .size _frr_xref_note, .-_frr_xref_note" "\n"\
+ " .popsection" "\n"\
+ "" "\n"\
+ /* end */
+#endif
+
+#endif /* HAVE_SECTION_SYMS */
+
+/* emit the array entry / pointer to xref */
+#if defined(__clang__) || !defined(__cplusplus)
+#define XREF_LINK(dst) \
+ static const struct xref * const NAMECTR(xref_p_) \
+ __attribute__((used, section("xref_array"))) \
+ = &(dst) \
+ /* end */
+
+#else /* GCC && C++ */
+/* workaround for GCC bug 41091 (dated 2009), added in 2021...
+ *
+ * this breaks extraction of xrefs with xrelfo.py (because the xref_array
+ * entry will be missing), but provides full runtime functionality. To get
+ * the proper list of xrefs from C++ code, build with clang...
+ */
+struct _xref_p {
+ const struct xref * const ptr;
+
+ _xref_p(const struct xref *_ptr) : ptr(_ptr)
+ {
+ xref_gcc_workaround(_ptr);
+ }
+};
+
+#define XREF_LINK(dst) \
+ static const struct _xref_p __attribute__((used)) \
+ NAMECTR(xref_p_)(&(dst)) \
+ /* end */
+#endif
+
+/* initializer for a "struct xref" */
+#define XREF_INIT(type_, xrefdata_, func_) \
+ { \
+ /* .xrefdata = */ (xrefdata_), \
+ /* .type = */ (type_), \
+ /* .line = */ __LINE__, \
+ /* .file = */ __FILE__, \
+ /* .func = */ func_, \
+ } \
+ /* end */
+
+/* use with XREF_INIT when outside of a function, i.e. no __func__ */
+#define XREF_NO_FUNC "<global>"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FRR_XREF_H */
diff --git a/lib/zclient.c b/lib/zclient.c
index f16c94369b..20c285cf7f 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -996,17 +996,24 @@ done:
return ret;
}
-int zapi_nhg_encode(struct stream *s, int cmd, struct zapi_nhg *api_nhg)
+static int zapi_nhg_encode(struct stream *s, int cmd, struct zapi_nhg *api_nhg)
{
int i;
if (cmd != ZEBRA_NHG_DEL && cmd != ZEBRA_NHG_ADD) {
flog_err(EC_LIB_ZAPI_ENCODE,
- "%s: Specified zapi NHG command (%d) doesn't exist\n",
+ "%s: Specified zapi NHG command (%d) doesn't exist",
__func__, cmd);
return -1;
}
+ if (api_nhg->nexthop_num >= MULTIPATH_NUM ||
+ api_nhg->backup_nexthop_num >= MULTIPATH_NUM) {
+ flog_err(EC_LIB_ZAPI_ENCODE,
+ "%s: zapi NHG encode with invalid input", __func__);
+ return -1;
+ }
+
stream_reset(s);
zclient_create_header(s, cmd, VRF_DEFAULT);
@@ -1024,7 +1031,6 @@ int zapi_nhg_encode(struct stream *s, int cmd, struct zapi_nhg *api_nhg)
zapi_nexthop_encode(s, &api_nhg->nexthops[i], 0, 0);
/* Backup nexthops */
-
stream_putw(s, api_nhg->backup_nexthop_num);
for (i = 0; i < api_nhg->backup_nexthop_num; i++)
@@ -1059,7 +1065,7 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api)
if (api->type >= ZEBRA_ROUTE_MAX) {
flog_err(EC_LIB_ZAPI_ENCODE,
- "%s: Specified route type (%u) is not a legal value\n",
+ "%s: Specified route type (%u) is not a legal value",
__func__, api->type);
return -1;
}
@@ -1071,7 +1077,7 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api)
if (api->safi < SAFI_UNICAST || api->safi >= SAFI_MAX) {
flog_err(EC_LIB_ZAPI_ENCODE,
- "%s: Specified route SAFI (%u) is not a legal value\n",
+ "%s: Specified route SAFI (%u) is not a legal value",
__func__, api->safi);
return -1;
}
@@ -1286,7 +1292,7 @@ int zapi_route_decode(struct stream *s, struct zapi_route *api)
STREAM_GETC(s, api->type);
if (api->type >= ZEBRA_ROUTE_MAX) {
flog_err(EC_LIB_ZAPI_ENCODE,
- "%s: Specified route type: %d is not a legal value\n",
+ "%s: Specified route type: %d is not a legal value",
__func__, api->type);
return -1;
}
@@ -1297,7 +1303,7 @@ int zapi_route_decode(struct stream *s, struct zapi_route *api)
STREAM_GETC(s, api->safi);
if (api->safi < SAFI_UNICAST || api->safi >= SAFI_MAX) {
flog_err(EC_LIB_ZAPI_ENCODE,
- "%s: Specified route SAFI (%u) is not a legal value\n",
+ "%s: Specified route SAFI (%u) is not a legal value",
__func__, api->safi);
return -1;
}
@@ -3298,7 +3304,7 @@ static void zclient_capability_decode(ZAPI_CALLBACK_ARGS)
if (vrf_backend < 0 || vrf_configure_backend(vrf_backend)) {
flog_err(EC_LIB_ZAPI_ENCODE,
- "%s: Garbage VRF backend type: %d\n", __func__,
+ "%s: Garbage VRF backend type: %d", __func__,
vrf_backend);
goto stream_failure;
}
diff --git a/lib/zclient.h b/lib/zclient.h
index 57bad7c2e6..cf52ea91a0 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -1021,9 +1021,7 @@ bool zapi_ipset_notify_decode(struct stream *s,
uint32_t *unique,
enum zapi_ipset_notify_owner *note);
-
-extern int zapi_nhg_encode(struct stream *s, int cmd, struct zapi_nhg *api_nhg);
-extern int zapi_nhg_decode(struct stream *s, int cmd, struct zapi_nhg *api_nhg);
+/* Nexthop-group message apis */
extern enum zclient_send_status
zclient_nhg_send(struct zclient *zclient, int cmd, struct zapi_nhg *api_nhg);
diff --git a/lib/zlog.c b/lib/zlog.c
index e77feec5f2..51509e24f4 100644
--- a/lib/zlog.c
+++ b/lib/zlog.c
@@ -94,6 +94,7 @@ struct zlog_msg {
const char *fmt;
va_list args;
+ const struct xref_logmsg *xref;
char *stackbuf;
size_t stackbufsz;
@@ -349,12 +350,14 @@ void zlog_tls_buffer_flush(void)
}
-static void vzlog_notls(int prio, const char *fmt, va_list ap)
+static void vzlog_notls(const struct xref_logmsg *xref, int prio,
+ const char *fmt, va_list ap)
{
struct zlog_target *zt;
struct zlog_msg stackmsg = {
.prio = prio & LOG_PRIMASK,
.fmt = fmt,
+ .xref = xref,
}, *msg = &stackmsg;
char stackbuf[512];
@@ -379,8 +382,8 @@ static void vzlog_notls(int prio, const char *fmt, va_list ap)
XFREE(MTYPE_LOG_MESSAGE, msg->text);
}
-static void vzlog_tls(struct zlog_tls *zlog_tls, int prio,
- const char *fmt, va_list ap)
+static void vzlog_tls(struct zlog_tls *zlog_tls, const struct xref_logmsg *xref,
+ int prio, const char *fmt, va_list ap)
{
struct zlog_target *zt;
struct zlog_msg *msg;
@@ -413,6 +416,7 @@ static void vzlog_tls(struct zlog_tls *zlog_tls, int prio,
msg->stackbufsz = TLS_LOG_BUF_SIZE - zlog_tls->bufpos - 1;
msg->fmt = fmt;
msg->prio = prio & LOG_PRIMASK;
+ msg->xref = xref;
if (msg->prio < LOG_INFO)
immediate = true;
@@ -447,7 +451,8 @@ static void vzlog_tls(struct zlog_tls *zlog_tls, int prio,
XFREE(MTYPE_LOG_MESSAGE, msg->text);
}
-void vzlog(int prio, const char *fmt, va_list ap)
+void vzlogx(const struct xref_logmsg *xref, int prio,
+ const char *fmt, va_list ap)
{
struct zlog_tls *zlog_tls = zlog_tls_get();
@@ -480,9 +485,9 @@ void vzlog(int prio, const char *fmt, va_list ap)
#endif
if (zlog_tls)
- vzlog_tls(zlog_tls, prio, fmt, ap);
+ vzlog_tls(zlog_tls, xref, prio, fmt, ap);
else
- vzlog_notls(prio, fmt, ap);
+ vzlog_notls(xref, prio, fmt, ap);
}
void zlog_sigsafe(const char *text, size_t len)
@@ -516,6 +521,11 @@ int zlog_msg_prio(struct zlog_msg *msg)
return msg->prio;
}
+const struct xref_logmsg *zlog_msg_xref(struct zlog_msg *msg)
+{
+ return msg->xref;
+}
+
const char *zlog_msg_text(struct zlog_msg *msg, size_t *textlen)
{
if (!msg->text) {
diff --git a/lib/zlog.h b/lib/zlog.h
index 1c5013746b..bdf59fa68e 100644
--- a/lib/zlog.h
+++ b/lib/zlog.h
@@ -38,6 +38,20 @@ extern char zlog_prefix[];
extern size_t zlog_prefixsz;
extern int zlog_tmpdirfd;
+struct xref_logmsg {
+ struct xref xref;
+
+ const char *fmtstring;
+ uint32_t priority;
+ uint32_t ec;
+};
+
+struct xrefdata_logmsg {
+ struct xrefdata xrefdata;
+
+ /* nothing more here right now */
+};
+
/* These functions are set up to write to stdout/stderr without explicit
* initialization and/or before config load. There is no need to call e.g.
* fprintf(stderr, ...) just because it's "too early" at startup. Depending
@@ -45,7 +59,9 @@ extern int zlog_tmpdirfd;
* determine wether something is a log message or something else.
*/
-extern void vzlog(int prio, const char *fmt, va_list ap);
+extern void vzlogx(const struct xref_logmsg *xref, int prio,
+ const char *fmt, va_list ap);
+#define vzlog(prio, ...) vzlogx(NULL, prio, __VA_ARGS__)
PRINTFRR(2, 3)
static inline void zlog(int prio, const char *fmt, ...)
@@ -57,11 +73,61 @@ static inline void zlog(int prio, const char *fmt, ...)
va_end(ap);
}
-#define zlog_err(...) zlog(LOG_ERR, __VA_ARGS__)
-#define zlog_warn(...) zlog(LOG_WARNING, __VA_ARGS__)
-#define zlog_info(...) zlog(LOG_INFO, __VA_ARGS__)
-#define zlog_notice(...) zlog(LOG_NOTICE, __VA_ARGS__)
-#define zlog_debug(...) zlog(LOG_DEBUG, __VA_ARGS__)
+PRINTFRR(2, 3)
+static inline void zlog_ref(const struct xref_logmsg *xref,
+ const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vzlogx(xref, xref->priority, fmt, ap);
+ va_end(ap);
+}
+
+#define _zlog_ref(prio, msg, ...) do { \
+ static struct xrefdata _xrefdata = { \
+ .hashstr = (msg), \
+ .hashu32 = { (prio), 0 }, \
+ }; \
+ static const struct xref_logmsg _xref __attribute__((used)) = {\
+ .xref = XREF_INIT(XREFT_LOGMSG, &_xrefdata, __func__), \
+ .fmtstring = (msg), \
+ .priority = (prio), \
+ }; \
+ XREF_LINK(_xref.xref); \
+ zlog_ref(&_xref, (msg), ## __VA_ARGS__); \
+ } while (0)
+
+#define zlog_err(...) _zlog_ref(LOG_ERR, __VA_ARGS__)
+#define zlog_warn(...) _zlog_ref(LOG_WARNING, __VA_ARGS__)
+#define zlog_info(...) _zlog_ref(LOG_INFO, __VA_ARGS__)
+#define zlog_notice(...) _zlog_ref(LOG_NOTICE, __VA_ARGS__)
+#define zlog_debug(...) _zlog_ref(LOG_DEBUG, __VA_ARGS__)
+
+#define _zlog_ecref(ec_, prio, msg, ...) do { \
+ static struct xrefdata _xrefdata = { \
+ .hashstr = (msg), \
+ .hashu32 = { (prio), (ec_) }, \
+ }; \
+ static const struct xref_logmsg _xref __attribute__((used)) = {\
+ .xref = XREF_INIT(XREFT_LOGMSG, &_xrefdata, __func__), \
+ .fmtstring = (msg), \
+ .priority = (prio), \
+ .ec = (ec_), \
+ }; \
+ XREF_LINK(_xref.xref); \
+ zlog_ref(&_xref, "[EC %u] " msg, ec_, ## __VA_ARGS__); \
+ } while (0)
+
+#define flog_err(ferr_id, format, ...) \
+ _zlog_ecref(ferr_id, LOG_ERR, format, ## __VA_ARGS__)
+#define flog_warn(ferr_id, format, ...) \
+ _zlog_ecref(ferr_id, LOG_WARNING, format, ## __VA_ARGS__)
+
+#define flog_err_sys(ferr_id, format, ...) \
+ flog_err(ferr_id, format, ##__VA_ARGS__)
+#define flog(priority, ferr_id, format, ...) \
+ zlog(priority, "[EC %u] " format, ferr_id, ##__VA_ARGS__)
extern void zlog_sigsafe(const char *text, size_t len);
@@ -83,6 +149,7 @@ extern void zlog_sigsafe(const char *text, size_t len);
struct zlog_msg;
extern int zlog_msg_prio(struct zlog_msg *msg);
+extern const struct xref_logmsg *zlog_msg_xref(struct zlog_msg *msg);
/* pass NULL as textlen if you don't need it. */
extern const char *zlog_msg_text(struct zlog_msg *msg, size_t *textlen);
diff --git a/nhrpd/nhrp_main.c b/nhrpd/nhrp_main.c
index 9fc13761c8..49a4900bf8 100644
--- a/nhrpd/nhrp_main.c
+++ b/nhrpd/nhrp_main.c
@@ -144,8 +144,13 @@ int main(int argc, char **argv)
nhrp_interface_init();
resolver_init(master);
- /* Run with elevated capabilities, as for all netlink activity
- * we need privileges anyway. */
+ /*
+ * Run with elevated capabilities, as for all netlink activity
+ * we need privileges anyway.
+ * The assert is for clang SA code where it does
+ * not see the change function being set in lib
+ */
+ assert(nhrpd_privs.change);
nhrpd_privs.change(ZPRIVS_RAISE);
netlink_init();
diff --git a/nhrpd/nhrp_route.c b/nhrpd/nhrp_route.c
index ce2b1fe2ff..334f468c18 100644
--- a/nhrpd/nhrp_route.c
+++ b/nhrpd/nhrp_route.c
@@ -98,6 +98,7 @@ void nhrp_route_announce(int add, enum nhrp_cache_type type,
{
struct zapi_route api;
struct zapi_nexthop *api_nh;
+ union sockunion *nexthop_ref = (union sockunion *)nexthop;
if (zclient->sock < 0)
return;
@@ -133,8 +134,14 @@ void nhrp_route_announce(int add, enum nhrp_cache_type type,
switch (api.prefix.family) {
case AF_INET:
- if (nexthop) {
- api_nh->gate.ipv4 = nexthop->sin.sin_addr;
+ if (api.prefix.prefixlen == IPV4_MAX_BITLEN &&
+ nexthop_ref &&
+ memcmp(&nexthop_ref->sin.sin_addr, &api.prefix.u.prefix4,
+ sizeof(struct in_addr)) == 0) {
+ nexthop_ref = NULL;
+ }
+ if (nexthop_ref) {
+ api_nh->gate.ipv4 = nexthop_ref->sin.sin_addr;
api_nh->type = NEXTHOP_TYPE_IPV4;
}
if (ifp) {
@@ -146,8 +153,14 @@ void nhrp_route_announce(int add, enum nhrp_cache_type type,
}
break;
case AF_INET6:
- if (nexthop) {
- api_nh->gate.ipv6 = nexthop->sin6.sin6_addr;
+ if (api.prefix.prefixlen == IPV6_MAX_BITLEN &&
+ nexthop_ref &&
+ memcmp(&nexthop_ref->sin6.sin6_addr, &api.prefix.u.prefix6,
+ sizeof(struct in6_addr)) == 0) {
+ nexthop_ref = NULL;
+ }
+ if (nexthop_ref) {
+ api_nh->gate.ipv6 = nexthop_ref->sin6.sin6_addr;
api_nh->type = NEXTHOP_TYPE_IPV6;
}
if (ifp) {
@@ -170,8 +183,9 @@ void nhrp_route_announce(int add, enum nhrp_cache_type type,
zlog_debug(
"Zebra send: route %s %pFX nexthop %s metric %u count %d dev %s",
add ? "add" : "del", &api.prefix,
- nexthop ? inet_ntop(api.prefix.family, &api_nh->gate,
- buf, sizeof(buf))
+ nexthop_ref ? inet_ntop(api.prefix.family,
+ &api_nh->gate,
+ buf, sizeof(buf))
: "<onlink>",
api.metric, api.nexthop_num, ifp ? ifp->name : "none");
}
diff --git a/nhrpd/nhrp_shortcut.c b/nhrpd/nhrp_shortcut.c
index 6ad0c9ea03..fbb883185a 100644
--- a/nhrpd/nhrp_shortcut.c
+++ b/nhrpd/nhrp_shortcut.c
@@ -51,18 +51,26 @@ static int nhrp_shortcut_do_expire(struct thread *t)
static void nhrp_shortcut_cache_notify(struct notifier_block *n,
unsigned long cmd)
{
+ char buf2[PREFIX_STRLEN];
+
struct nhrp_shortcut *s =
container_of(n, struct nhrp_shortcut, cache_notifier);
+ struct nhrp_cache *c = s->cache;
+ if (c)
+ sockunion2str(&c->remote_addr, buf2, sizeof(buf2));
+ else
+ snprintf(buf2, sizeof(buf2), "(unspec)");
switch (cmd) {
case NOTIFY_CACHE_UP:
if (!s->route_installed) {
debugf(NHRP_DEBUG_ROUTE,
- "Shortcut: route install %pFX nh (unspec) dev %s",
- s->p, s->cache->ifp->name);
+ "Shortcut: route install %pFX nh %s dev %s",
+ s->p, buf2, c && c->ifp ?
+ c->ifp->name : "<unk>");
- nhrp_route_announce(1, s->type, s->p, s->cache->ifp,
- &s->cache->remote_addr, 0);
+ nhrp_route_announce(1, s->type, s->p, c ? c->ifp : NULL,
+ c ? &c->remote_addr : NULL, 0);
s->route_installed = 1;
}
break;
diff --git a/ospf6d/ospf6_abr.c b/ospf6d/ospf6_abr.c
index 8cfdf2642c..abcdb40547 100644
--- a/ospf6d/ospf6_abr.c
+++ b/ospf6d/ospf6_abr.c
@@ -160,35 +160,22 @@ int ospf6_abr_originate_summary_to_area(struct ospf6_route *route,
&& route->type != OSPF6_DEST_TYPE_RANGE
&& ((route->type != OSPF6_DEST_TYPE_ROUTER)
|| !CHECK_FLAG(route->path.router_bits, OSPF6_ROUTER_BIT_E))) {
-#if 0
- zlog_debug(
- "Route type is none of network, range nor ASBR, ignore");
-#endif
return 0;
}
/* AS External routes are never considered */
if (route->path.type == OSPF6_PATH_TYPE_EXTERNAL1
|| route->path.type == OSPF6_PATH_TYPE_EXTERNAL2) {
-#if 0
- zlog_debug("Path type is external, skip");
-#endif
return 0;
}
/* do not generate if the path's area is the same as target area */
if (route->path.area_id == area->area_id) {
-#if 0
- zlog_debug("The route is in the area itself, ignore");
-#endif
return 0;
}
/* do not generate if the nexthops belongs to the target area */
if (ospf6_abr_nexthops_belong_to_area(route, area)) {
-#if 0
- zlog_debug("The route's nexthop is in the same area, ignore");
-#endif
return 0;
}
@@ -770,6 +757,10 @@ void ospf6_abr_old_path_update(struct ospf6_route *old_route,
void ospf6_abr_old_route_remove(struct ospf6_lsa *lsa, struct ospf6_route *old,
struct ospf6_route_table *table)
{
+ if (IS_OSPF6_DEBUG_ABR)
+ zlog_debug("%s: route %pFX, paths %d", __func__, &old->prefix,
+ listcount(old->paths));
+
if (listcount(old->paths) > 1) {
struct listnode *anode, *anext, *nnode, *rnode, *rnext;
struct ospf6_path *o_path;
@@ -778,13 +769,15 @@ void ospf6_abr_old_route_remove(struct ospf6_lsa *lsa, struct ospf6_route *old,
for (ALL_LIST_ELEMENTS(old->paths, anode, anext, o_path)) {
if (o_path->origin.adv_router != lsa->header->adv_router
- && o_path->origin.id != lsa->header->id)
+ || o_path->origin.id != lsa->header->id)
continue;
for (ALL_LIST_ELEMENTS_RO(o_path->nh_list, nnode, nh)) {
for (ALL_LIST_ELEMENTS(old->nh_list,
rnode, rnext, rnh)) {
if (!ospf6_nexthop_is_same(rnh, nh))
continue;
+ if (IS_OSPF6_DEBUG_ABR)
+ zlog_debug("deleted nexthop");
listnode_delete(old->nh_list, rnh);
ospf6_nexthop_delete(rnh);
}
@@ -847,14 +840,16 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
bool old_entry_updated = false;
struct ospf6_path *path, *o_path, *ecmp_path;
struct listnode *anode;
+ bool add_route = false;
memset(&prefix, 0, sizeof(prefix));
if (lsa->header->type == htons(OSPF6_LSTYPE_INTER_PREFIX)) {
if (IS_OSPF6_DEBUG_EXAMIN(INTER_PREFIX)) {
is_debug++;
- zlog_debug("%s: Examin %s in area %s", __func__,
- lsa->name, oa->name);
+ zlog_debug("%s: LSA %s age %d in area %s", __func__,
+ lsa->name, ospf6_lsa_age_current(lsa),
+ oa->name);
}
prefix_lsa =
@@ -873,8 +868,9 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
} else if (lsa->header->type == htons(OSPF6_LSTYPE_INTER_ROUTER)) {
if (IS_OSPF6_DEBUG_EXAMIN(INTER_ROUTER)) {
is_debug++;
- zlog_debug("%s: Examin %s in area %s", __func__,
- lsa->name, oa->name);
+ zlog_debug("%s: LSA %s age %d in area %s", __func__,
+ lsa->name, ospf6_lsa_age_current(lsa),
+ oa->name);
}
router_lsa =
@@ -898,8 +894,12 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
/* Find existing route */
route = ospf6_route_lookup(&prefix, table);
- if (route)
+ if (route) {
ospf6_route_lock(route);
+ if (is_debug)
+ zlog_debug("%s: route %pFX, paths %d", __func__,
+ &prefix, listcount(route->paths));
+ }
while (route && ospf6_route_is_prefix(&prefix, route)) {
if (route->path.area_id == oa->area_id
&& route->path.origin.type == lsa->header->type
@@ -952,6 +952,7 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
return;
}
+
/* (2) if the LSA is self-originated, ignore */
if (lsa->header->adv_router == oa->ospf6->router_id) {
if (is_debug)
@@ -1026,8 +1027,8 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
|| !CHECK_FLAG(abr_entry->path.router_bits, OSPF6_ROUTER_BIT_B)) {
if (is_debug)
zlog_debug(
- "%s: ABR router entry does not exist, ignore",
- __func__);
+ "%s: ABR router entry %pFX does not exist, ignore",
+ __func__, &abr_prefix);
if (old) {
if (old->type == OSPF6_DEST_TYPE_ROUTER &&
oa->intra_brouter_calc) {
@@ -1040,7 +1041,7 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
zlog_debug(
"%s: remove old entry: %s %p ",
__func__, buf, (void *)old);
- ospf6_route_remove(old, table);
+ ospf6_abr_old_route_remove(lsa, old, table);
}
}
return;
@@ -1104,7 +1105,11 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
are identical.
*/
old = ospf6_route_lookup(&prefix, table);
-
+ if (old) {
+ if (is_debug)
+ zlog_debug("%s: found old route %pFX, paths %d",
+ __func__, &prefix, listcount(old->paths));
+ }
for (old_route = old; old_route; old_route = old_route->next) {
if (!ospf6_route_is_same(old_route, route) ||
(old_route->type != route->type) ||
@@ -1186,7 +1191,7 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
"%s: Update route: %s %p old cost %u new cost %u nh %u",
__func__, buf, (void *)old_route,
old_route->path.cost, route->path.cost,
- listcount(route->nh_list));
+ listcount(old_route->nh_list));
/* For Inter-Prefix route: Update RIB/FIB,
* For Inter-Router trigger summary update
@@ -1199,10 +1204,19 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
break;
}
+ /* If the old entry is not updated and old entry not found or old entry
+ * does not match with the new entry then add the new route
+ */
if (old_entry_updated == false) {
+ if ((old == NULL) || (old->type != route->type)
+ || (old->path.type != route->path.type))
+ add_route = true;
+ }
+
+ if (add_route) {
if (is_debug) {
zlog_debug(
- "%s: Install route: %s cost %u nh %u adv_router %pI4",
+ "%s: Install new route: %s cost %u nh %u adv_router %pI4",
__func__, buf, route->path.cost,
listcount(route->nh_list),
&route->path.origin.adv_router);
@@ -1296,7 +1310,9 @@ static char *ospf6_inter_area_prefix_lsa_get_prefix_str(struct ospf6_lsa *lsa,
}
static int ospf6_inter_area_prefix_lsa_show(struct vty *vty,
- struct ospf6_lsa *lsa)
+ struct ospf6_lsa *lsa,
+ json_object *json_obj,
+ bool use_json)
{
struct ospf6_inter_prefix_lsa *prefix_lsa;
char buf[INET6_ADDRSTRLEN];
@@ -1304,16 +1320,29 @@ static int ospf6_inter_area_prefix_lsa_show(struct vty *vty,
prefix_lsa = (struct ospf6_inter_prefix_lsa *)OSPF6_LSA_HEADER_END(
lsa->header);
- vty_out(vty, " Metric: %lu\n",
- (unsigned long)OSPF6_ABR_SUMMARY_METRIC(prefix_lsa));
+ if (use_json) {
+ json_object_int_add(
+ json_obj, "metric",
+ (unsigned long)OSPF6_ABR_SUMMARY_METRIC(prefix_lsa));
+ ospf6_prefix_options_printbuf(prefix_lsa->prefix.prefix_options,
+ buf, sizeof(buf));
+ json_object_string_add(json_obj, "prefixOptions", buf);
+ json_object_string_add(
+ json_obj, "prefix",
+ ospf6_inter_area_prefix_lsa_get_prefix_str(
+ lsa, buf, sizeof(buf), 0));
+ } else {
+ vty_out(vty, " Metric: %lu\n",
+ (unsigned long)OSPF6_ABR_SUMMARY_METRIC(prefix_lsa));
- ospf6_prefix_options_printbuf(prefix_lsa->prefix.prefix_options, buf,
- sizeof(buf));
- vty_out(vty, " Prefix Options: %s\n", buf);
+ ospf6_prefix_options_printbuf(prefix_lsa->prefix.prefix_options,
+ buf, sizeof(buf));
+ vty_out(vty, " Prefix Options: %s\n", buf);
- vty_out(vty, " Prefix: %s\n",
- ospf6_inter_area_prefix_lsa_get_prefix_str(lsa, buf,
- sizeof(buf), 0));
+ vty_out(vty, " Prefix: %s\n",
+ ospf6_inter_area_prefix_lsa_get_prefix_str(
+ lsa, buf, sizeof(buf), 0));
+ }
return 0;
}
@@ -1338,7 +1367,9 @@ static char *ospf6_inter_area_router_lsa_get_prefix_str(struct ospf6_lsa *lsa,
}
static int ospf6_inter_area_router_lsa_show(struct vty *vty,
- struct ospf6_lsa *lsa)
+ struct ospf6_lsa *lsa,
+ json_object *json_obj,
+ bool use_json)
{
struct ospf6_inter_router_lsa *router_lsa;
char buf[64];
@@ -1347,12 +1378,22 @@ static int ospf6_inter_area_router_lsa_show(struct vty *vty,
lsa->header);
ospf6_options_printbuf(router_lsa->options, buf, sizeof(buf));
- vty_out(vty, " Options: %s\n", buf);
- vty_out(vty, " Metric: %lu\n",
- (unsigned long)OSPF6_ABR_SUMMARY_METRIC(router_lsa));
+ if (use_json) {
+ json_object_string_add(json_obj, "options", buf);
+ json_object_int_add(
+ json_obj, "metric",
+ (unsigned long)OSPF6_ABR_SUMMARY_METRIC(router_lsa));
+ } else {
+ vty_out(vty, " Options: %s\n", buf);
+ vty_out(vty, " Metric: %lu\n",
+ (unsigned long)OSPF6_ABR_SUMMARY_METRIC(router_lsa));
+ }
inet_ntop(AF_INET, &router_lsa->router_id, buf, sizeof(buf));
- vty_out(vty, " Destination Router ID: %s\n", buf);
+ if (use_json)
+ json_object_string_add(json_obj, "destinationRouterId", buf);
+ else
+ vty_out(vty, " Destination Router ID: %s\n", buf);
return 0;
}
diff --git a/ospf6d/ospf6_asbr.c b/ospf6d/ospf6_asbr.c
index 0e419cbff6..3449f48267 100644
--- a/ospf6d/ospf6_asbr.c
+++ b/ospf6d/ospf6_asbr.c
@@ -210,7 +210,7 @@ void ospf6_asbr_update_route_ecmp_path(struct ospf6_route *old,
struct ospf6_route *route,
struct ospf6 *ospf6)
{
- struct ospf6_route *old_route;
+ struct ospf6_route *old_route, *next_route;
struct ospf6_path *ecmp_path, *o_path = NULL;
struct listnode *anode, *anext;
struct listnode *nnode, *rnode, *rnext;
@@ -220,9 +220,11 @@ void ospf6_asbr_update_route_ecmp_path(struct ospf6_route *old,
/* check for old entry match with new route origin,
* delete old entry.
*/
- for (old_route = old; old_route; old_route = old_route->next) {
+ for (old_route = old; old_route; old_route = next_route) {
bool route_updated = false;
+ next_route = old_route->next;
+
if (!ospf6_route_is_same(old_route, route)
|| (old_route->path.type != route->path.type))
continue;
@@ -315,6 +317,8 @@ void ospf6_asbr_update_route_ecmp_path(struct ospf6_route *old,
old_route->path.cost,
route->path.cost);
}
+ if (old == old_route)
+ old = next_route;
ospf6_route_remove(old_route,
ospf6->route_table);
}
@@ -1439,8 +1443,7 @@ static void ospf6_redistribute_show_config(struct vty *vty, struct ospf6 *ospf6,
struct ospf6_redist *red;
total = 0;
- for (type = 0; type < ZEBRA_ROUTE_MAX; type++)
- nroute[type] = 0;
+ memset(nroute, 0, sizeof(nroute));
for (route = ospf6_route_head(ospf6->external_table); route;
route = ospf6_route_next(route)) {
info = route->route_option;
@@ -1448,12 +1451,11 @@ static void ospf6_redistribute_show_config(struct vty *vty, struct ospf6 *ospf6,
total++;
}
- if (use_json)
- json_route = json_object_new_object();
- else
+ if (!use_json)
vty_out(vty, "Redistributing External Routes from:\n");
for (type = 0; type < ZEBRA_ROUTE_MAX; type++) {
+
red = ospf6_redist_lookup(ospf6, type, 0);
if (!red)
@@ -1462,6 +1464,7 @@ static void ospf6_redistribute_show_config(struct vty *vty, struct ospf6 *ospf6,
continue;
if (use_json) {
+ json_route = json_object_new_object();
json_object_string_add(json_route, "routeType",
ZROUTE_NAME(type));
json_object_int_add(json_route, "numberOfRoutes",
@@ -1890,7 +1893,8 @@ static char *ospf6_as_external_lsa_get_prefix_str(struct ospf6_lsa *lsa,
return (buf);
}
-static int ospf6_as_external_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
+static int ospf6_as_external_lsa_show(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json_obj, bool use_json)
{
struct ospf6_as_external_lsa *external;
char buf[64];
@@ -1908,31 +1912,65 @@ static int ospf6_as_external_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
(CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_T) ? 'T'
: '-'));
- vty_out(vty, " Bits: %s\n", buf);
- vty_out(vty, " Metric: %5lu\n",
- (unsigned long)OSPF6_ASBR_METRIC(external));
-
- ospf6_prefix_options_printbuf(external->prefix.prefix_options, buf,
- sizeof(buf));
- vty_out(vty, " Prefix Options: %s\n", buf);
+ if (use_json) {
+ json_object_string_add(json_obj, "bits", buf);
+ json_object_int_add(json_obj, "metric",
+ (unsigned long)OSPF6_ASBR_METRIC(external));
+ ospf6_prefix_options_printbuf(external->prefix.prefix_options,
+ buf, sizeof(buf));
+ json_object_string_add(json_obj, "prefixOptions", buf);
+ json_object_int_add(
+ json_obj, "referenceLsType",
+ ntohs(external->prefix.prefix_refer_lstype));
+ json_object_string_add(json_obj, "prefix",
+ ospf6_as_external_lsa_get_prefix_str(
+ lsa, buf, sizeof(buf), 0));
+
+ /* Forwarding-Address */
+ json_object_boolean_add(
+ json_obj, "forwardingAddressPresent",
+ CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_F));
+ if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_F))
+ json_object_string_add(
+ json_obj, "forwardingAddress",
+ ospf6_as_external_lsa_get_prefix_str(
+ lsa, buf, sizeof(buf), 1));
+
+ /* Tag */
+ json_object_boolean_add(
+ json_obj, "tagPresent",
+ CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_T));
+ if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_T))
+ json_object_int_add(json_obj, "tag",
+ ospf6_as_external_lsa_get_tag(lsa));
+ } else {
+ vty_out(vty, " Bits: %s\n", buf);
+ vty_out(vty, " Metric: %5lu\n",
+ (unsigned long)OSPF6_ASBR_METRIC(external));
- vty_out(vty, " Referenced LSType: %d\n",
- ntohs(external->prefix.prefix_refer_lstype));
+ ospf6_prefix_options_printbuf(external->prefix.prefix_options,
+ buf, sizeof(buf));
+ vty_out(vty, " Prefix Options: %s\n", buf);
- vty_out(vty, " Prefix: %s\n",
- ospf6_as_external_lsa_get_prefix_str(lsa, buf, sizeof(buf), 0));
+ vty_out(vty, " Referenced LSType: %d\n",
+ ntohs(external->prefix.prefix_refer_lstype));
- /* Forwarding-Address */
- if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_F)) {
- vty_out(vty, " Forwarding-Address: %s\n",
+ vty_out(vty, " Prefix: %s\n",
ospf6_as_external_lsa_get_prefix_str(lsa, buf,
- sizeof(buf), 1));
- }
+ sizeof(buf), 0));
- /* Tag */
- if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_T)) {
- vty_out(vty, " Tag: %" ROUTE_TAG_PRI "\n",
- ospf6_as_external_lsa_get_tag(lsa));
+ /* Forwarding-Address */
+ if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_F)) {
+ vty_out(vty, " Forwarding-Address: %s\n",
+ ospf6_as_external_lsa_get_prefix_str(
+ lsa, buf, sizeof(buf), 1));
+ }
+
+ /* Tag */
+ if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_T)) {
+ vty_out(vty, " Tag: %" ROUTE_TAG_PRI "\n",
+ ospf6_as_external_lsa_get_tag(lsa));
+ }
}
return 0;
diff --git a/ospf6d/ospf6_flood.c b/ospf6d/ospf6_flood.c
index 0662cfd683..2d896546fa 100644
--- a/ospf6d/ospf6_flood.c
+++ b/ospf6d/ospf6_flood.c
@@ -452,12 +452,6 @@ void ospf6_flood_area(struct ospf6_neighbor *from, struct ospf6_lsa *lsa,
&& oi != OSPF6_INTERFACE(lsa->lsdb->data))
continue;
-#if 0
- if (OSPF6_LSA_SCOPE (lsa->header->type) == OSPF6_SCOPE_AS &&
- ospf6_is_interface_virtual_link (oi))
- continue;
-#endif /*0*/
-
ospf6_flood_interface(from, lsa, oi);
}
}
@@ -527,12 +521,6 @@ static void ospf6_flood_clear_area(struct ospf6_lsa *lsa, struct ospf6_area *oa)
&& oi != OSPF6_INTERFACE(lsa->lsdb->data))
continue;
-#if 0
- if (OSPF6_LSA_SCOPE (lsa->header->type) == OSPF6_SCOPE_AS &&
- ospf6_is_interface_virtual_link (oi))
- continue;
-#endif /*0*/
-
ospf6_flood_clear_interface(lsa, oi);
}
}
diff --git a/ospf6d/ospf6_interface.c b/ospf6d/ospf6_interface.c
index 13e1b2abdc..621cc36a0c 100644
--- a/ospf6d/ospf6_interface.c
+++ b/ospf6d/ospf6_interface.c
@@ -250,6 +250,7 @@ void ospf6_interface_delete(struct ospf6_interface *oi)
THREAD_OFF(oi->thread_send_lsupdate);
THREAD_OFF(oi->thread_send_lsack);
THREAD_OFF(oi->thread_sso);
+ THREAD_OFF(oi->thread_wait_timer);
ospf6_lsdb_remove_all(oi->lsdb);
ospf6_lsdb_remove_all(oi->lsupdate_list);
@@ -304,6 +305,7 @@ void ospf6_interface_disable(struct ospf6_interface *oi)
THREAD_OFF(oi->thread_link_lsa);
THREAD_OFF(oi->thread_intra_prefix_lsa);
THREAD_OFF(oi->thread_as_extern_lsa);
+ THREAD_OFF(oi->thread_wait_timer);
}
static struct in6_addr *
@@ -793,7 +795,7 @@ int interface_up(struct thread *thread)
else {
ospf6_interface_state_change(OSPF6_INTERFACE_WAITING, oi);
thread_add_timer(master, wait_timer, oi, oi->dead_interval,
- NULL);
+ &oi->thread_wait_timer);
}
return 0;
@@ -1414,7 +1416,7 @@ DEFUN (show_ipv6_ospf6_interface_ifname_prefix,
[<\
detail\
|<X:X::X:X|X:X::X:X/M> [<match|detail>]\
- >]",
+ >] [json]",
SHOW_STR
IP6_STR
OSPF6_STR
@@ -1425,12 +1427,14 @@ DEFUN (show_ipv6_ospf6_interface_ifname_prefix,
OSPF6_ROUTE_ADDRESS_STR
OSPF6_ROUTE_PREFIX_STR
OSPF6_ROUTE_MATCH_STR
- "Display details of the prefixes\n")
+ "Display details of the prefixes\n"
+ JSON_STR)
{
int idx_ifname = 4;
int idx_prefix = 6;
struct interface *ifp;
struct ospf6_interface *oi;
+ bool uj = use_json(argc, argv);
ifp = if_lookup_by_name(argv[idx_ifname]->arg, VRF_DEFAULT);
if (ifp == NULL) {
@@ -1445,8 +1449,8 @@ DEFUN (show_ipv6_ospf6_interface_ifname_prefix,
return CMD_WARNING;
}
- ospf6_route_table_show(vty, idx_prefix, argc, argv,
- oi->route_connected);
+ ospf6_route_table_show(vty, idx_prefix, argc, argv, oi->route_connected,
+ uj);
return CMD_SUCCESS;
}
@@ -1457,7 +1461,7 @@ DEFUN (show_ipv6_ospf6_interface_prefix,
[<\
detail\
|<X:X::X:X|X:X::X:X/M> [<match|detail>]\
- >]",
+ >] [json]",
SHOW_STR
IP6_STR
OSPF6_STR
@@ -1467,12 +1471,14 @@ DEFUN (show_ipv6_ospf6_interface_prefix,
OSPF6_ROUTE_ADDRESS_STR
OSPF6_ROUTE_PREFIX_STR
OSPF6_ROUTE_MATCH_STR
- "Display details of the prefixes\n")
+ "Display details of the prefixes\n"
+ JSON_STR)
{
struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
int idx_prefix = 5;
struct ospf6_interface *oi;
struct interface *ifp;
+ bool uj = use_json(argc, argv);
FOR_ALL_INTERFACES (vrf, ifp) {
oi = (struct ospf6_interface *)ifp->info;
@@ -1480,7 +1486,7 @@ DEFUN (show_ipv6_ospf6_interface_prefix,
continue;
ospf6_route_table_show(vty, idx_prefix, argc, argv,
- oi->route_connected);
+ oi->route_connected, uj);
}
return CMD_SUCCESS;
diff --git a/ospf6d/ospf6_interface.h b/ospf6d/ospf6_interface.h
index dd7f4d1b1e..6e4692920c 100644
--- a/ospf6d/ospf6_interface.h
+++ b/ospf6d/ospf6_interface.h
@@ -111,6 +111,7 @@ struct ospf6_interface {
struct thread *thread_link_lsa;
struct thread *thread_intra_prefix_lsa;
struct thread *thread_as_extern_lsa;
+ struct thread *thread_wait_timer;
struct ospf6_route_table *route_connected;
diff --git a/ospf6d/ospf6_intra.c b/ospf6d/ospf6_intra.c
index 17538c466a..2abe64ac60 100644
--- a/ospf6d/ospf6_intra.c
+++ b/ospf6d/ospf6_intra.c
@@ -76,7 +76,8 @@ static char *ospf6_router_lsa_get_nbr_id(struct ospf6_lsa *lsa, char *buf,
*)(start
+ pos * (sizeof(struct
ospf6_router_lsdesc)));
- if ((char *)lsdesc < end) {
+ if ((char *)lsdesc + sizeof(struct ospf6_router_lsdesc)
+ <= end) {
if (buf && (buflen > INET_ADDRSTRLEN * 2)) {
inet_ntop(AF_INET,
&lsdesc->neighbor_interface_id, buf1,
@@ -84,20 +85,24 @@ static char *ospf6_router_lsa_get_nbr_id(struct ospf6_lsa *lsa, char *buf,
inet_ntop(AF_INET, &lsdesc->neighbor_router_id,
buf2, sizeof(buf2));
sprintf(buf, "%s/%s", buf2, buf1);
+
+ return buf;
}
- } else
- return NULL;
+ }
}
- return buf;
+ return NULL;
}
-static int ospf6_router_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
+static int ospf6_router_lsa_show(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json_obj, bool use_json)
{
char *start, *end, *current;
char buf[32], name[32], bits[16], options[32];
struct ospf6_router_lsa *router_lsa;
struct ospf6_router_lsdesc *lsdesc;
+ json_object *json_arr;
+ json_object *json_loop;
router_lsa =
(struct ospf6_router_lsa *)((char *)lsa->header
@@ -105,7 +110,12 @@ static int ospf6_router_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
ospf6_capability_printbuf(router_lsa->bits, bits, sizeof(bits));
ospf6_options_printbuf(router_lsa->options, options, sizeof(options));
- vty_out(vty, " Bits: %s Options: %s\n", bits, options);
+ if (use_json) {
+ json_object_string_add(json_obj, "bits", bits);
+ json_object_string_add(json_obj, "options", options);
+ json_arr = json_object_new_array();
+ } else
+ vty_out(vty, " Bits: %s Options: %s\n", bits, options);
start = (char *)router_lsa + sizeof(struct ospf6_router_lsa);
end = (char *)lsa->header + ntohs(lsa->header->length);
@@ -126,18 +136,43 @@ static int ospf6_router_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
snprintf(name, sizeof(name), "Unknown (%#x)",
lsdesc->type);
- vty_out(vty, " Type: %s Metric: %d\n", name,
- ntohs(lsdesc->metric));
- vty_out(vty, " Interface ID: %s\n",
- inet_ntop(AF_INET, &lsdesc->interface_id, buf,
- sizeof(buf)));
- vty_out(vty, " Neighbor Interface ID: %s\n",
- inet_ntop(AF_INET, &lsdesc->neighbor_interface_id, buf,
- sizeof(buf)));
- vty_out(vty, " Neighbor Router ID: %s\n",
- inet_ntop(AF_INET, &lsdesc->neighbor_router_id, buf,
- sizeof(buf)));
+ if (use_json) {
+ json_loop = json_object_new_object();
+ json_object_string_add(json_loop, "type", name);
+ json_object_int_add(json_loop, "metric",
+ ntohs(lsdesc->metric));
+ json_object_string_add(json_loop, "interfaceId",
+ inet_ntop(AF_INET,
+ &lsdesc->interface_id,
+ buf, sizeof(buf)));
+ json_object_string_add(
+ json_loop, "neighborInterfaceId",
+ inet_ntop(AF_INET,
+ &lsdesc->neighbor_interface_id, buf,
+ sizeof(buf)));
+ json_object_string_add(
+ json_loop, "neighborRouterId",
+ inet_ntop(AF_INET, &lsdesc->neighbor_router_id,
+ buf, sizeof(buf)));
+ json_object_array_add(json_arr, json_loop);
+ } else {
+ vty_out(vty, " Type: %s Metric: %d\n", name,
+ ntohs(lsdesc->metric));
+ vty_out(vty, " Interface ID: %s\n",
+ inet_ntop(AF_INET, &lsdesc->interface_id, buf,
+ sizeof(buf)));
+ vty_out(vty, " Neighbor Interface ID: %s\n",
+ inet_ntop(AF_INET,
+ &lsdesc->neighbor_interface_id, buf,
+ sizeof(buf)));
+ vty_out(vty, " Neighbor Router ID: %s\n",
+ inet_ntop(AF_INET, &lsdesc->neighbor_router_id,
+ buf, sizeof(buf)));
+ }
}
+ if (use_json)
+ json_object_object_add(json_obj, "lsaDescription", json_arr);
+
return 0;
}
@@ -411,39 +446,55 @@ static char *ospf6_network_lsa_get_ar_id(struct ospf6_lsa *lsa, char *buf,
if ((current + sizeof(struct ospf6_network_lsdesc)) <= end) {
lsdesc = (struct ospf6_network_lsdesc *)current;
- if (buf)
+ if (buf) {
inet_ntop(AF_INET, &lsdesc->router_id, buf,
buflen);
- } else
- return NULL;
+ return buf;
+ }
+ }
}
- return (buf);
+ return NULL;
}
-static int ospf6_network_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
+static int ospf6_network_lsa_show(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json_obj, bool use_json)
{
char *start, *end, *current;
struct ospf6_network_lsa *network_lsa;
struct ospf6_network_lsdesc *lsdesc;
char buf[128], options[32];
+ json_object *json_arr;
network_lsa =
(struct ospf6_network_lsa *)((caddr_t)lsa->header
+ sizeof(struct ospf6_lsa_header));
ospf6_options_printbuf(network_lsa->options, options, sizeof(options));
- vty_out(vty, " Options: %s\n", options);
+ if (use_json)
+ json_object_string_add(json_obj, "options", options);
+ else
+ vty_out(vty, " Options: %s\n", options);
start = (char *)network_lsa + sizeof(struct ospf6_network_lsa);
end = (char *)lsa->header + ntohs(lsa->header->length);
+ if (use_json)
+ json_arr = json_object_new_array();
+
for (current = start;
current + sizeof(struct ospf6_network_lsdesc) <= end;
current += sizeof(struct ospf6_network_lsdesc)) {
lsdesc = (struct ospf6_network_lsdesc *)current;
inet_ntop(AF_INET, &lsdesc->router_id, buf, sizeof(buf));
- vty_out(vty, " Attached Router: %s\n", buf);
+ if (use_json)
+ json_object_array_add(json_arr,
+ json_object_new_string(buf));
+ else
+ vty_out(vty, " Attached Router: %s\n", buf);
}
+ if (use_json)
+ json_object_object_add(json_obj, "attachedRouter", json_arr);
+
return 0;
}
@@ -602,7 +653,7 @@ static char *ospf6_link_lsa_get_prefix_str(struct ospf6_lsa *lsa, char *buf,
end = (char *)lsa->header + ntohs(lsa->header->length);
current = start;
- do {
+ while (current + sizeof(struct ospf6_prefix) <= end) {
prefix = (struct ospf6_prefix *)current;
if (prefix->prefix_length == 0
|| current + OSPF6_PREFIX_SIZE(prefix) > end) {
@@ -620,12 +671,13 @@ static char *ospf6_link_lsa_get_prefix_str(struct ospf6_lsa *lsa, char *buf,
inet_ntop(AF_INET6, &in6, buf, buflen);
return (buf);
}
- } while (current <= end);
+ }
}
return NULL;
}
-static int ospf6_link_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
+static int ospf6_link_lsa_show(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json_obj, bool use_json)
{
char *start, *end, *current;
struct ospf6_link_lsa *link_lsa;
@@ -634,6 +686,10 @@ static int ospf6_link_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
struct ospf6_prefix *prefix;
const char *p, *mc, *la, *nu;
struct in6_addr in6;
+ json_object *json_loop;
+ json_object *json_arr = NULL;
+ char str[15];
+ char prefix_string[133];
link_lsa = (struct ospf6_link_lsa *)((caddr_t)lsa->header
+ sizeof(struct ospf6_lsa_header));
@@ -642,10 +698,18 @@ static int ospf6_link_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
inet_ntop(AF_INET6, &link_lsa->linklocal_addr, buf, sizeof(buf));
prefixnum = ntohl(link_lsa->prefix_num);
- vty_out(vty, " Priority: %d Options: %s\n", link_lsa->priority,
- options);
- vty_out(vty, " LinkLocal Address: %s\n", buf);
- vty_out(vty, " Number of Prefix: %d\n", prefixnum);
+ if (use_json) {
+ json_arr = json_object_new_array();
+ json_object_int_add(json_obj, "priority", link_lsa->priority);
+ json_object_string_add(json_obj, "options", options);
+ json_object_string_add(json_obj, "linkLocalAddress", buf);
+ json_object_int_add(json_obj, "numberOfPrefix", prefixnum);
+ } else {
+ vty_out(vty, " Priority: %d Options: %s\n",
+ link_lsa->priority, options);
+ vty_out(vty, " LinkLocal Address: %s\n", buf);
+ vty_out(vty, " Number of Prefix: %d\n", prefixnum);
+ }
start = (char *)link_lsa + sizeof(struct ospf6_link_lsa);
end = (char *)lsa->header + ntohs(lsa->header->length);
@@ -668,16 +732,31 @@ static int ospf6_link_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
nu = (CHECK_FLAG(prefix->prefix_options, OSPF6_PREFIX_OPTION_NU)
? "NU"
: "--");
- vty_out(vty, " Prefix Options: %s|%s|%s|%s\n", p, mc, la,
- nu);
+ if (use_json) {
+ json_loop = json_object_new_object();
+ snprintf(str, sizeof(str), "%s|%s|%s|%s", p, mc, la,
+ nu);
+ json_object_string_add(json_loop, "prefixOption", str);
+ } else
+ vty_out(vty, " Prefix Options: %s|%s|%s|%s\n", p,
+ mc, la, nu);
memset(&in6, 0, sizeof(in6));
memcpy(&in6, OSPF6_PREFIX_BODY(prefix),
OSPF6_PREFIX_SPACE(prefix->prefix_length));
inet_ntop(AF_INET6, &in6, buf, sizeof(buf));
- vty_out(vty, " Prefix: %s/%d\n", buf,
- prefix->prefix_length);
+ if (use_json) {
+ snprintf(prefix_string, sizeof(prefix_string), "%s/%d",
+ buf, prefix->prefix_length);
+ json_object_string_add(json_loop, "prefix",
+ prefix_string);
+ json_object_array_add(json_arr, json_loop);
+ } else
+ vty_out(vty, " Prefix: %s/%d\n", buf,
+ prefix->prefix_length);
}
+ if (use_json)
+ json_object_object_add(json_obj, "prefix", json_arr);
return 0;
}
@@ -803,7 +882,7 @@ static char *ospf6_intra_prefix_lsa_get_prefix_str(struct ospf6_lsa *lsa,
end = (char *)lsa->header + ntohs(lsa->header->length);
current = start;
- do {
+ while (current + sizeof(struct ospf6_prefix) <= end) {
prefix = (struct ospf6_prefix *)current;
if (prefix->prefix_length == 0
|| current + OSPF6_PREFIX_SIZE(prefix) > end) {
@@ -823,12 +902,13 @@ static char *ospf6_intra_prefix_lsa_get_prefix_str(struct ospf6_lsa *lsa,
prefix->prefix_length);
return (buf);
}
- } while (current <= end);
+ }
}
- return (buf);
+ return NULL;
}
-static int ospf6_intra_prefix_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
+static int ospf6_intra_prefix_lsa_show(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json_obj, bool use_json)
{
char *start, *end, *current;
struct ospf6_intra_prefix_lsa *intra_prefix_lsa;
@@ -838,6 +918,10 @@ static int ospf6_intra_prefix_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
char id[16], adv_router[16];
const char *p, *mc, *la, *nu;
struct in6_addr in6;
+ json_object *json_loop;
+ json_object *json_arr = NULL;
+ char str[15];
+ char prefix_string[133];
intra_prefix_lsa = (struct ospf6_intra_prefix_lsa
*)((caddr_t)lsa->header
@@ -845,13 +929,25 @@ static int ospf6_intra_prefix_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
prefixnum = ntohs(intra_prefix_lsa->prefix_num);
- vty_out(vty, " Number of Prefix: %d\n", prefixnum);
+ if (use_json) {
+ json_arr = json_object_new_array();
+ json_object_int_add(json_obj, "numberOfPrefix", prefixnum);
+ } else
+ vty_out(vty, " Number of Prefix: %d\n", prefixnum);
inet_ntop(AF_INET, &intra_prefix_lsa->ref_id, id, sizeof(id));
inet_ntop(AF_INET, &intra_prefix_lsa->ref_adv_router, adv_router,
sizeof(adv_router));
- vty_out(vty, " Reference: %s Id: %s Adv: %s\n",
- ospf6_lstype_name(intra_prefix_lsa->ref_type), id, adv_router);
+ if (use_json) {
+ json_object_string_add(
+ json_obj, "reference",
+ ospf6_lstype_name(intra_prefix_lsa->ref_type));
+ json_object_string_add(json_obj, "referenceId", id);
+ json_object_string_add(json_obj, "referenceAdv", adv_router);
+ } else
+ vty_out(vty, " Reference: %s Id: %s Adv: %s\n",
+ ospf6_lstype_name(intra_prefix_lsa->ref_type), id,
+ adv_router);
start = (char *)intra_prefix_lsa
+ sizeof(struct ospf6_intra_prefix_lsa);
@@ -875,16 +971,31 @@ static int ospf6_intra_prefix_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
nu = (CHECK_FLAG(prefix->prefix_options, OSPF6_PREFIX_OPTION_NU)
? "NU"
: "--");
- vty_out(vty, " Prefix Options: %s|%s|%s|%s\n", p, mc, la,
- nu);
+ if (use_json) {
+ json_loop = json_object_new_object();
+ snprintf(str, sizeof(str), "%s|%s|%s|%s", p, mc, la,
+ nu);
+ json_object_string_add(json_loop, "prefixOption", str);
+ } else
+ vty_out(vty, " Prefix Options: %s|%s|%s|%s\n", p,
+ mc, la, nu);
memset(&in6, 0, sizeof(in6));
memcpy(&in6, OSPF6_PREFIX_BODY(prefix),
OSPF6_PREFIX_SPACE(prefix->prefix_length));
inet_ntop(AF_INET6, &in6, buf, sizeof(buf));
- vty_out(vty, " Prefix: %s/%d\n", buf,
- prefix->prefix_length);
+ if (use_json) {
+ snprintf(prefix_string, sizeof(prefix_string), "%s/%d",
+ buf, prefix->prefix_length);
+ json_object_string_add(json_loop, "prefix",
+ prefix_string);
+ json_object_array_add(json_arr, json_loop);
+ } else
+ vty_out(vty, " Prefix: %s/%d\n", buf,
+ prefix->prefix_length);
}
+ if (use_json)
+ json_object_object_add(json_obj, "prefix", json_arr);
return 0;
}
@@ -1338,6 +1449,8 @@ static void ospf6_intra_prefix_update_route_origin(struct ospf6_route *oa_route,
g_route->path.origin.id = h_path->origin.id;
g_route->path.origin.adv_router =
h_path->origin.adv_router;
+ if (nroute)
+ ospf6_route_unlock(nroute);
break;
}
}
diff --git a/ospf6d/ospf6_lsa.c b/ospf6d/ospf6_lsa.c
index 29141ee7f8..f1b04c9bec 100644
--- a/ospf6d/ospf6_lsa.c
+++ b/ospf6d/ospf6_lsa.c
@@ -66,7 +66,8 @@ struct ospf6 *ospf6_get_by_lsdb(struct ospf6_lsa *lsa)
return ospf6;
}
-static int ospf6_unknown_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
+static int ospf6_unknown_lsa_show(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json_obj, bool use_json)
{
uint8_t *start, *end, *current;
char byte[4];
@@ -74,18 +75,22 @@ static int ospf6_unknown_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
start = (uint8_t *)lsa->header + sizeof(struct ospf6_lsa_header);
end = (uint8_t *)lsa->header + ntohs(lsa->header->length);
- vty_out(vty, " Unknown contents:\n");
- for (current = start; current < end; current++) {
- if ((current - start) % 16 == 0)
- vty_out(vty, "\n ");
- else if ((current - start) % 4 == 0)
- vty_out(vty, " ");
+ if (use_json)
+ json_object_string_add(json_obj, "LsaType", "unknown");
+ else {
+ vty_out(vty, " Unknown contents:\n");
+ for (current = start; current < end; current++) {
+ if ((current - start) % 16 == 0)
+ vty_out(vty, "\n ");
+ else if ((current - start) % 4 == 0)
+ vty_out(vty, " ");
+
+ snprintf(byte, sizeof(byte), "%02x", *current);
+ vty_out(vty, "%s", byte);
+ }
- snprintf(byte, sizeof(byte), "%02x", *current);
- vty_out(vty, "%s", byte);
+ vty_out(vty, "\n\n");
}
-
- vty_out(vty, "\n\n");
return 0;
}
@@ -392,13 +397,15 @@ void ospf6_lsa_show_summary_header(struct vty *vty)
"AdvRouter", "Age", "SeqNum", "Payload");
}
-void ospf6_lsa_show_summary(struct vty *vty, struct ospf6_lsa *lsa)
+void ospf6_lsa_show_summary(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json_array, bool use_json)
{
char adv_router[16], id[16];
int type;
const struct ospf6_lsa_handler *handler;
- char buf[64], tmpbuf[80];
+ char buf[64];
int cnt = 0;
+ json_object *json_obj = NULL;
assert(lsa);
assert(lsa->header);
@@ -409,34 +416,95 @@ void ospf6_lsa_show_summary(struct vty *vty, struct ospf6_lsa *lsa)
type = ntohs(lsa->header->type);
handler = ospf6_get_lsa_handler(lsa->header->type);
+
+ if (use_json)
+ json_obj = json_object_new_object();
+
if ((type == OSPF6_LSTYPE_INTER_PREFIX)
|| (type == OSPF6_LSTYPE_INTER_ROUTER)
|| (type == OSPF6_LSTYPE_AS_EXTERNAL)) {
- vty_out(vty, "%-4s %-15s%-15s%4hu %8lx %30s\n",
- ospf6_lstype_short_name(lsa->header->type), id,
- adv_router, ospf6_lsa_age_current(lsa),
- (unsigned long)ntohl(lsa->header->seqnum),
- handler->lh_get_prefix_str(lsa, buf, sizeof(buf), 0));
+ if (use_json) {
+ json_object_string_add(
+ json_obj, "type",
+ ospf6_lstype_short_name(lsa->header->type));
+ json_object_string_add(json_obj, "lsId", id);
+ json_object_string_add(json_obj, "advRouter",
+ adv_router);
+ json_object_int_add(json_obj, "age",
+ ospf6_lsa_age_current(lsa));
+ json_object_int_add(
+ json_obj, "seqNum",
+ (unsigned long)ntohl(lsa->header->seqnum));
+ json_object_string_add(
+ json_obj, "payload",
+ handler->lh_get_prefix_str(lsa, buf,
+ sizeof(buf), 0));
+ json_object_array_add(json_array, json_obj);
+ } else
+ vty_out(vty, "%-4s %-15s%-15s%4hu %8lx %30s\n",
+ ospf6_lstype_short_name(lsa->header->type), id,
+ adv_router, ospf6_lsa_age_current(lsa),
+ (unsigned long)ntohl(lsa->header->seqnum),
+ handler->lh_get_prefix_str(lsa, buf,
+ sizeof(buf), 0));
} else if (type != OSPF6_LSTYPE_UNKNOWN) {
- snprintf(tmpbuf, sizeof(tmpbuf), "%-4s %-15s%-15s%4hu %8lx",
- ospf6_lstype_short_name(lsa->header->type), id,
- adv_router, ospf6_lsa_age_current(lsa),
- (unsigned long)ntohl(lsa->header->seqnum));
-
while (handler->lh_get_prefix_str(lsa, buf, sizeof(buf), cnt)
!= NULL) {
- vty_out(vty, "%s %30s\n", tmpbuf, buf);
+ if (use_json) {
+ json_object_string_add(
+ json_obj, "type",
+ ospf6_lstype_short_name(
+ lsa->header->type));
+ json_object_string_add(json_obj, "lsId", id);
+ json_object_string_add(json_obj, "advRouter",
+ adv_router);
+ json_object_int_add(json_obj, "age",
+ ospf6_lsa_age_current(lsa));
+ json_object_int_add(
+ json_obj, "seqNum",
+ (unsigned long)ntohl(
+ lsa->header->seqnum));
+ json_object_string_add(json_obj, "payload",
+ buf);
+ json_object_array_add(json_array, json_obj);
+ json_obj = json_object_new_object();
+ } else
+ vty_out(vty, "%-4s %-15s%-15s%4hu %8lx %30s\n",
+ ospf6_lstype_short_name(
+ lsa->header->type),
+ id, adv_router,
+ ospf6_lsa_age_current(lsa),
+ (unsigned long)ntohl(
+ lsa->header->seqnum),
+ buf);
cnt++;
}
+ if (use_json)
+ json_object_free(json_obj);
} else {
- vty_out(vty, "%-4s %-15s%-15s%4hu %8lx\n",
- ospf6_lstype_short_name(lsa->header->type), id,
- adv_router, ospf6_lsa_age_current(lsa),
- (unsigned long)ntohl(lsa->header->seqnum));
+ if (use_json) {
+ json_object_string_add(
+ json_obj, "type",
+ ospf6_lstype_short_name(lsa->header->type));
+ json_object_string_add(json_obj, "lsId", id);
+ json_object_string_add(json_obj, "advRouter",
+ adv_router);
+ json_object_int_add(json_obj, "age",
+ ospf6_lsa_age_current(lsa));
+ json_object_int_add(
+ json_obj, "seqNum",
+ (unsigned long)ntohl(lsa->header->seqnum));
+ json_object_array_add(json_array, json_obj);
+ } else
+ vty_out(vty, "%-4s %-15s%-15s%4hu %8lx\n",
+ ospf6_lstype_short_name(lsa->header->type), id,
+ adv_router, ospf6_lsa_age_current(lsa),
+ (unsigned long)ntohl(lsa->header->seqnum));
}
}
-void ospf6_lsa_show_dump(struct vty *vty, struct ospf6_lsa *lsa)
+void ospf6_lsa_show_dump(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json_array, bool use_json)
{
uint8_t *start, *end, *current;
char byte[4];
@@ -444,6 +512,9 @@ void ospf6_lsa_show_dump(struct vty *vty, struct ospf6_lsa *lsa)
start = (uint8_t *)lsa->header;
end = (uint8_t *)lsa->header + ntohs(lsa->header->length);
+ if (use_json)
+ return;
+
vty_out(vty, "\n");
vty_out(vty, "%s:\n", lsa->name);
@@ -458,12 +529,15 @@ void ospf6_lsa_show_dump(struct vty *vty, struct ospf6_lsa *lsa)
}
vty_out(vty, "\n\n");
+
return;
}
-void ospf6_lsa_show_internal(struct vty *vty, struct ospf6_lsa *lsa)
+void ospf6_lsa_show_internal(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json_array, bool use_json)
{
char adv_router[64], id[64];
+ json_object *json_obj;
assert(lsa && lsa->header);
@@ -471,30 +545,56 @@ void ospf6_lsa_show_internal(struct vty *vty, struct ospf6_lsa *lsa)
inet_ntop(AF_INET, &lsa->header->adv_router, adv_router,
sizeof(adv_router));
- vty_out(vty, "\n");
- vty_out(vty, "Age: %4hu Type: %s\n", ospf6_lsa_age_current(lsa),
- ospf6_lstype_name(lsa->header->type));
- vty_out(vty, "Link State ID: %s\n", id);
- vty_out(vty, "Advertising Router: %s\n", adv_router);
- vty_out(vty, "LS Sequence Number: %#010lx\n",
- (unsigned long)ntohl(lsa->header->seqnum));
- vty_out(vty, "CheckSum: %#06hx Length: %hu\n",
- ntohs(lsa->header->checksum), ntohs(lsa->header->length));
- vty_out(vty, "Flag: %x \n", lsa->flag);
- vty_out(vty, "Lock: %d \n", lsa->lock);
- vty_out(vty, "ReTx Count: %d\n", lsa->retrans_count);
- vty_out(vty, "Threads: Expire: 0x%p, Refresh: 0x%p \n",
- (void *)lsa->expire, (void *)lsa->refresh);
- vty_out(vty, "\n");
+ if (use_json) {
+ json_obj = json_object_new_object();
+ json_object_int_add(json_obj, "age",
+ ospf6_lsa_age_current(lsa));
+ json_object_string_add(json_obj, "type",
+ ospf6_lstype_name(lsa->header->type));
+ json_object_string_add(json_obj, "linkStateId", id);
+ json_object_string_add(json_obj, "advertisingRouter",
+ adv_router);
+ json_object_int_add(json_obj, "lsSequenceNumber",
+ (unsigned long)ntohl(lsa->header->seqnum));
+ json_object_int_add(json_obj, "checksum",
+ ntohs(lsa->header->checksum));
+ json_object_int_add(json_obj, "length",
+ ntohs(lsa->header->length));
+ json_object_int_add(json_obj, "flag", lsa->flag);
+ json_object_int_add(json_obj, "lock", lsa->lock);
+ json_object_int_add(json_obj, "reTxCount", lsa->retrans_count);
+
+ /* Threads Data not added */
+ json_object_array_add(json_array, json_obj);
+ } else {
+ vty_out(vty, "\n");
+ vty_out(vty, "Age: %4hu Type: %s\n", ospf6_lsa_age_current(lsa),
+ ospf6_lstype_name(lsa->header->type));
+ vty_out(vty, "Link State ID: %s\n", id);
+ vty_out(vty, "Advertising Router: %s\n", adv_router);
+ vty_out(vty, "LS Sequence Number: %#010lx\n",
+ (unsigned long)ntohl(lsa->header->seqnum));
+ vty_out(vty, "CheckSum: %#06hx Length: %hu\n",
+ ntohs(lsa->header->checksum),
+ ntohs(lsa->header->length));
+ vty_out(vty, "Flag: %x \n", lsa->flag);
+ vty_out(vty, "Lock: %d \n", lsa->lock);
+ vty_out(vty, "ReTx Count: %d\n", lsa->retrans_count);
+ vty_out(vty, "Threads: Expire: 0x%p, Refresh: 0x%p \n",
+ (void *)lsa->expire, (void *)lsa->refresh);
+ vty_out(vty, "\n");
+ }
return;
}
-void ospf6_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
+void ospf6_lsa_show(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json_array, bool use_json)
{
char adv_router[64], id[64];
const struct ospf6_lsa_handler *handler;
struct timeval now, res;
char duration[64];
+ json_object *json_obj = NULL;
assert(lsa && lsa->header);
@@ -505,27 +605,47 @@ void ospf6_lsa_show(struct vty *vty, struct ospf6_lsa *lsa)
monotime(&now);
timersub(&now, &lsa->installed, &res);
timerstring(&res, duration, sizeof(duration));
-
- vty_out(vty, "Age: %4hu Type: %s\n", ospf6_lsa_age_current(lsa),
- ospf6_lstype_name(lsa->header->type));
- vty_out(vty, "Link State ID: %s\n", id);
- vty_out(vty, "Advertising Router: %s\n", adv_router);
- vty_out(vty, "LS Sequence Number: %#010lx\n",
- (unsigned long)ntohl(lsa->header->seqnum));
- vty_out(vty, "CheckSum: %#06hx Length: %hu\n",
- ntohs(lsa->header->checksum), ntohs(lsa->header->length));
- vty_out(vty, "Duration: %s\n", duration);
+ if (use_json) {
+ json_obj = json_object_new_object();
+ json_object_int_add(json_obj, "age",
+ ospf6_lsa_age_current(lsa));
+ json_object_string_add(json_obj, "type",
+ ospf6_lstype_name(lsa->header->type));
+ json_object_string_add(json_obj, "advertisingRouter",
+ adv_router);
+ json_object_int_add(json_obj, "lsSequenceNumber",
+ (unsigned long)ntohl(lsa->header->seqnum));
+ json_object_int_add(json_obj, "checkSum",
+ ntohs(lsa->header->checksum));
+ json_object_int_add(json_obj, "length",
+ ntohs(lsa->header->length));
+ json_object_string_add(json_obj, "duration", duration);
+ } else {
+ vty_out(vty, "Age: %4hu Type: %s\n", ospf6_lsa_age_current(lsa),
+ ospf6_lstype_name(lsa->header->type));
+ vty_out(vty, "Link State ID: %s\n", id);
+ vty_out(vty, "Advertising Router: %s\n", adv_router);
+ vty_out(vty, "LS Sequence Number: %#010lx\n",
+ (unsigned long)ntohl(lsa->header->seqnum));
+ vty_out(vty, "CheckSum: %#06hx Length: %hu\n",
+ ntohs(lsa->header->checksum),
+ ntohs(lsa->header->length));
+ vty_out(vty, "Duration: %s\n", duration);
+ }
handler = ospf6_get_lsa_handler(lsa->header->type);
if (handler->lh_show != NULL)
- handler->lh_show(vty, lsa);
+ handler->lh_show(vty, lsa, json_obj, use_json);
else {
assert(unknown_handler.lh_show != NULL);
- unknown_handler.lh_show(vty, lsa);
+ unknown_handler.lh_show(vty, lsa, json_obj, use_json);
}
- vty_out(vty, "\n");
+ if (use_json)
+ json_object_array_add(json_array, json_obj);
+ else
+ vty_out(vty, "\n");
}
/* OSPFv3 LSA creation/deletion function */
diff --git a/ospf6d/ospf6_lsa.h b/ospf6d/ospf6_lsa.h
index 814e276796..7fa9c5fe40 100644
--- a/ospf6d/ospf6_lsa.h
+++ b/ospf6d/ospf6_lsa.h
@@ -21,6 +21,7 @@
#ifndef OSPF6_LSA_H
#define OSPF6_LSA_H
#include "ospf6_top.h"
+#include "lib/json.h"
/* Debug option */
#define OSPF6_LSA_DEBUG 0x01
@@ -141,9 +142,10 @@ struct ospf6_lsa_handler {
uint16_t lh_type; /* host byte order */
const char *lh_name;
const char *lh_short_name;
- int (*lh_show)(struct vty *, struct ospf6_lsa *);
- char *(*lh_get_prefix_str)(struct ospf6_lsa *, char *buf,
- int buflen, int pos);
+ int (*lh_show)(struct vty *, struct ospf6_lsa *, json_object *json_obj,
+ bool use_json);
+ char *(*lh_get_prefix_str)(struct ospf6_lsa *, char *buf, int buflen,
+ int pos);
uint8_t lh_debug;
};
@@ -206,10 +208,14 @@ extern char *ospf6_lsa_printbuf(struct ospf6_lsa *lsa, char *buf, int size);
extern void ospf6_lsa_header_print_raw(struct ospf6_lsa_header *header);
extern void ospf6_lsa_header_print(struct ospf6_lsa *lsa);
extern void ospf6_lsa_show_summary_header(struct vty *vty);
-extern void ospf6_lsa_show_summary(struct vty *vty, struct ospf6_lsa *lsa);
-extern void ospf6_lsa_show_dump(struct vty *vty, struct ospf6_lsa *lsa);
-extern void ospf6_lsa_show_internal(struct vty *vty, struct ospf6_lsa *lsa);
-extern void ospf6_lsa_show(struct vty *vty, struct ospf6_lsa *lsa);
+extern void ospf6_lsa_show_summary(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json, bool use_json);
+extern void ospf6_lsa_show_dump(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json, bool use_json);
+extern void ospf6_lsa_show_internal(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json, bool use_json);
+extern void ospf6_lsa_show(struct vty *vty, struct ospf6_lsa *lsa,
+ json_object *json, bool use_json);
extern struct ospf6_lsa *ospf6_lsa_create(struct ospf6_lsa_header *header);
extern struct ospf6_lsa *
diff --git a/ospf6d/ospf6_lsdb.c b/ospf6d/ospf6_lsdb.c
index c136c558cb..9636e1a230 100644
--- a/ospf6d/ospf6_lsdb.c
+++ b/ospf6d/ospf6_lsdb.c
@@ -346,55 +346,6 @@ int ospf6_lsdb_maxage_remover(struct ospf6_lsdb *lsdb)
return (reschedule);
}
-void ospf6_lsdb_show(struct vty *vty, enum ospf_lsdb_show_level level,
- uint16_t *type, uint32_t *id, uint32_t *adv_router,
- struct ospf6_lsdb *lsdb)
-{
- struct ospf6_lsa *lsa;
- const struct route_node *end = NULL;
- void (*showfunc)(struct vty *, struct ospf6_lsa *) = NULL;
-
- switch (level) {
- case OSPF6_LSDB_SHOW_LEVEL_DETAIL:
- showfunc = ospf6_lsa_show;
- break;
- case OSPF6_LSDB_SHOW_LEVEL_INTERNAL:
- showfunc = ospf6_lsa_show_internal;
- break;
- case OSPF6_LSDB_SHOW_LEVEL_DUMP:
- showfunc = ospf6_lsa_show_dump;
- break;
- case OSPF6_LSDB_SHOW_LEVEL_NORMAL:
- default:
- showfunc = ospf6_lsa_show_summary;
- }
-
- if (type && id && adv_router) {
- lsa = ospf6_lsdb_lookup(*type, *id, *adv_router, lsdb);
- if (lsa) {
- if (level == OSPF6_LSDB_SHOW_LEVEL_NORMAL)
- ospf6_lsa_show(vty, lsa);
- else
- (*showfunc)(vty, lsa);
- }
- return;
- }
-
- if (level == OSPF6_LSDB_SHOW_LEVEL_NORMAL)
- ospf6_lsa_show_summary_header(vty);
-
- end = ospf6_lsdb_head(lsdb, !!type + !!(type && adv_router),
- type ? *type : 0, adv_router ? *adv_router : 0,
- &lsa);
- while (lsa) {
- if ((!adv_router || lsa->header->adv_router == *adv_router)
- && (!id || lsa->header->id == *id))
- (*showfunc)(vty, lsa);
-
- lsa = ospf6_lsdb_next(end, lsa);
- }
-}
-
uint32_t ospf6_new_ls_id(uint16_t type, uint32_t adv_router,
struct ospf6_lsdb *lsdb)
{
diff --git a/ospf6d/ospf6_lsdb.h b/ospf6d/ospf6_lsdb.h
index 457e3dc4e4..7a62c46b02 100644
--- a/ospf6d/ospf6_lsdb.h
+++ b/ospf6d/ospf6_lsdb.h
@@ -92,7 +92,8 @@ enum ospf_lsdb_show_level {
extern void ospf6_lsdb_show(struct vty *vty, enum ospf_lsdb_show_level level,
uint16_t *type, uint32_t *id, uint32_t *adv_router,
- struct ospf6_lsdb *lsdb);
+ struct ospf6_lsdb *lsdb, json_object *json,
+ bool use_json);
extern uint32_t ospf6_new_ls_id(uint16_t type, uint32_t adv_router,
struct ospf6_lsdb *lsdb);
diff --git a/ospf6d/ospf6_route.c b/ospf6d/ospf6_route.c
index 60c208437b..b77f968179 100644
--- a/ospf6d/ospf6_route.c
+++ b/ospf6d/ospf6_route.c
@@ -163,6 +163,10 @@ const char *const ospf6_path_type_substr[OSPF6_PATH_TYPE_MAX] = {
"??", "IA", "IE", "E1", "E2",
};
+const char *ospf6_path_type_json[OSPF6_PATH_TYPE_MAX] = {
+ "UnknownRoute", "IntraArea", "InterArea", "External1", "External2",
+};
+
struct ospf6_nexthop *ospf6_nexthop_create(void)
{
@@ -1030,7 +1034,8 @@ void ospf6_route_table_delete(struct ospf6_route_table *table)
/* VTY commands */
-void ospf6_route_show(struct vty *vty, struct ospf6_route *route)
+void ospf6_route_show(struct vty *vty, struct ospf6_route *route,
+ json_object *json_array_routes, bool use_json)
{
int i;
char destination[PREFIX2STR_BUFFER], nexthop[64];
@@ -1038,6 +1043,9 @@ void ospf6_route_show(struct vty *vty, struct ospf6_route *route)
struct timeval now, res;
struct listnode *node;
struct ospf6_nexthop *nh;
+ json_object *json_route = NULL;
+ json_object *json_array_next_hops = NULL;
+ json_object *json_next_hop;
if (om6->ospf6 == NULL) {
vty_out(vty, "OSPFv3 is not running\n");
@@ -1058,34 +1066,74 @@ void ospf6_route_show(struct vty *vty, struct ospf6_route *route)
else
prefix2str(&route->prefix, destination, sizeof(destination));
- i = 0;
+ if (use_json) {
+ json_route = json_object_new_object();
+ json_object_string_add(json_route, "destination", destination);
+ json_object_boolean_add(json_route, "isBestRoute",
+ ospf6_route_is_best(route));
+ json_object_string_add(json_route, "destinationType",
+ OSPF6_DEST_TYPE_SUBSTR(route->type));
+ json_object_string_add(
+ json_route, "pathType",
+ OSPF6_PATH_TYPE_SUBSTR(route->path.type));
+ json_object_string_add(json_route, "duration", duration);
+ }
+
+ /* Nexthops */
+ if (use_json)
+ json_array_next_hops = json_object_new_array();
+ else
+ i = 0;
for (ALL_LIST_ELEMENTS_RO(route->nh_list, node, nh)) {
struct interface *ifp;
/* nexthop */
inet_ntop(AF_INET6, &nh->address, nexthop, sizeof(nexthop));
ifp = if_lookup_by_index_all_vrf(nh->ifindex);
- if (!i) {
- vty_out(vty, "%c%1s %2s %-30s %-25s %6.*s %s\n",
- (ospf6_route_is_best(route) ? '*' : ' '),
- OSPF6_DEST_TYPE_SUBSTR(route->type),
- OSPF6_PATH_TYPE_SUBSTR(route->path.type),
- destination, nexthop, IFNAMSIZ, ifp->name,
- duration);
- i++;
- } else
- vty_out(vty, "%c%1s %2s %-30s %-25s %6.*s %s\n", ' ',
- "", "", "", nexthop, IFNAMSIZ, ifp->name, "");
+ if (use_json) {
+ json_next_hop = json_object_new_object();
+ json_object_string_add(json_next_hop, "nextHop",
+ nexthop);
+ json_object_string_add(json_next_hop, "interfaceName",
+ ifp->name);
+ json_object_array_add(json_array_next_hops,
+ json_next_hop);
+ } else {
+ if (!i) {
+ vty_out(vty, "%c%1s %2s %-30s %-25s %6.*s %s\n",
+ (ospf6_route_is_best(route) ? '*'
+ : ' '),
+ OSPF6_DEST_TYPE_SUBSTR(route->type),
+ OSPF6_PATH_TYPE_SUBSTR(
+ route->path.type),
+ destination, nexthop, IFNAMSIZ,
+ ifp->name, duration);
+ i++;
+ } else
+ vty_out(vty, "%c%1s %2s %-30s %-25s %6.*s %s\n",
+ ' ', "", "", "", nexthop, IFNAMSIZ,
+ ifp->name, "");
+ }
+ }
+ if (use_json) {
+ json_object_object_add(json_route, "nextHops",
+ json_array_next_hops);
+ json_object_array_add(json_array_routes, json_route);
}
}
-void ospf6_route_show_detail(struct vty *vty, struct ospf6_route *route)
+void ospf6_route_show_detail(struct vty *vty, struct ospf6_route *route,
+ json_object *json_array_routes, bool use_json)
{
- char destination[PREFIX2STR_BUFFER];
+ char destination[PREFIX2STR_BUFFER], nexthop[64];
char area_id[16], id[16], adv_router[16], capa[16], options[16];
struct timeval now, res;
char duration[64];
struct listnode *node;
struct ospf6_nexthop *nh;
+ char flag[6];
+ json_object *json_route = NULL;
+ json_object *json_array_next_hops = NULL;
+ json_object *json_next_hop;
if (om6->ospf6 == NULL) {
vty_out(vty, "OSPFv3 is not running\n");
@@ -1103,84 +1151,177 @@ void ospf6_route_show_detail(struct vty *vty, struct ospf6_route *route)
destination, sizeof(destination));
else
prefix2str(&route->prefix, destination, sizeof(destination));
- vty_out(vty, "Destination: %s\n", destination);
- /* destination type */
- vty_out(vty, "Destination type: %s\n",
- OSPF6_DEST_TYPE_NAME(route->type));
+ if (use_json) {
+ json_route = json_object_new_object();
+ json_object_string_add(json_route, "destination", destination);
+ json_object_string_add(json_route, "destinationType",
+ OSPF6_DEST_TYPE_NAME(route->type));
+ } else {
+ vty_out(vty, "Destination: %s\n", destination);
+ vty_out(vty, "Destination type: %s\n",
+ OSPF6_DEST_TYPE_NAME(route->type));
+ }
/* Time */
timersub(&now, &route->installed, &res);
timerstring(&res, duration, sizeof(duration));
- vty_out(vty, "Installed Time: %s ago\n", duration);
+ if (use_json)
+ json_object_string_add(json_route, "installedTimeSince",
+ duration);
+ else
+ vty_out(vty, "Installed Time: %s ago\n", duration);
timersub(&now, &route->changed, &res);
timerstring(&res, duration, sizeof(duration));
- vty_out(vty, " Changed Time: %s ago\n", duration);
+ if (use_json)
+ json_object_string_add(json_route, "changedTimeSince",
+ duration);
+ else
+ vty_out(vty, "Changed Time: %s ago\n", duration);
/* Debugging info */
- vty_out(vty, "Lock: %d Flags: %s%s%s%s\n", route->lock,
- (CHECK_FLAG(route->flag, OSPF6_ROUTE_BEST) ? "B" : "-"),
- (CHECK_FLAG(route->flag, OSPF6_ROUTE_ADD) ? "A" : "-"),
- (CHECK_FLAG(route->flag, OSPF6_ROUTE_REMOVE) ? "R" : "-"),
- (CHECK_FLAG(route->flag, OSPF6_ROUTE_CHANGE) ? "C" : "-"));
- vty_out(vty, "Memory: prev: %p this: %p next: %p\n",
- (void *)route->prev, (void *)route, (void *)route->next);
+ if (use_json) {
+ json_object_int_add(json_route, "numberOfLock", route->lock);
+ snprintf(
+ flag, sizeof(flag), "%s%s%s%s",
+ (CHECK_FLAG(route->flag, OSPF6_ROUTE_BEST) ? "B" : "-"),
+ (CHECK_FLAG(route->flag, OSPF6_ROUTE_ADD) ? "A" : "-"),
+ (CHECK_FLAG(route->flag, OSPF6_ROUTE_REMOVE) ? "R"
+ : "-"),
+ (CHECK_FLAG(route->flag, OSPF6_ROUTE_CHANGE) ? "C"
+ : "-"));
+ json_object_string_add(json_route, "flags", flag);
+ } else {
+ vty_out(vty, "Lock: %d Flags: %s%s%s%s\n", route->lock,
+ (CHECK_FLAG(route->flag, OSPF6_ROUTE_BEST) ? "B" : "-"),
+ (CHECK_FLAG(route->flag, OSPF6_ROUTE_ADD) ? "A" : "-"),
+ (CHECK_FLAG(route->flag, OSPF6_ROUTE_REMOVE) ? "R"
+ : "-"),
+ (CHECK_FLAG(route->flag, OSPF6_ROUTE_CHANGE) ? "C"
+ : "-"));
+ vty_out(vty, "Memory: prev: %p this: %p next: %p\n",
+ (void *)route->prev, (void *)route,
+ (void *)route->next);
+ }
/* Path section */
/* Area-ID */
inet_ntop(AF_INET, &route->path.area_id, area_id, sizeof(area_id));
- vty_out(vty, "Associated Area: %s\n", area_id);
+ if (use_json)
+ json_object_string_add(json_route, "associatedArea", area_id);
+ else
+ vty_out(vty, "Associated Area: %s\n", area_id);
/* Path type */
- vty_out(vty, "Path Type: %s\n", OSPF6_PATH_TYPE_NAME(route->path.type));
+ if (use_json)
+ json_object_string_add(json_route, "pathType",
+ OSPF6_PATH_TYPE_NAME(route->path.type));
+ else
+ vty_out(vty, "Path Type: %s\n",
+ OSPF6_PATH_TYPE_NAME(route->path.type));
/* LS Origin */
inet_ntop(AF_INET, &route->path.origin.id, id, sizeof(id));
inet_ntop(AF_INET, &route->path.origin.adv_router, adv_router,
sizeof(adv_router));
- vty_out(vty, "LS Origin: %s Id: %s Adv: %s\n",
- ospf6_lstype_name(route->path.origin.type), id, adv_router);
+ if (use_json) {
+ json_object_string_add(
+ json_route, "lsOriginRoutePathType",
+ ospf6_lstype_name(route->path.origin.type));
+ json_object_string_add(json_route, "lsId", id);
+ json_object_string_add(json_route, "lsAdvertisingRouter",
+ adv_router);
+ } else {
+ vty_out(vty, "LS Origin: %s Id: %s Adv: %s\n",
+ ospf6_lstype_name(route->path.origin.type), id,
+ adv_router);
+ }
/* Options */
ospf6_options_printbuf(route->path.options, options, sizeof(options));
- vty_out(vty, "Options: %s\n", options);
+ if (use_json)
+ json_object_string_add(json_route, "options", options);
+ else
+ vty_out(vty, "Options: %s\n", options);
/* Router Bits */
ospf6_capability_printbuf(route->path.router_bits, capa, sizeof(capa));
- vty_out(vty, "Router Bits: %s\n", capa);
+ if (use_json)
+ json_object_string_add(json_route, "routerBits", capa);
+ else
+ vty_out(vty, "Router Bits: %s\n", capa);
/* Prefix Options */
- vty_out(vty, "Prefix Options: xxx\n");
+ if (use_json)
+ json_object_string_add(json_route, "prefixOptions", "xxx");
+ else
+ vty_out(vty, "Prefix Options: xxx\n");
/* Metrics */
- vty_out(vty, "Metric Type: %d\n", route->path.metric_type);
- vty_out(vty, "Metric: %d (%d)\n", route->path.cost,
- route->path.u.cost_e2);
+ if (use_json) {
+ json_object_int_add(json_route, "metricType",
+ route->path.metric_type);
+ json_object_int_add(json_route, "metricCost", route->path.cost);
+ json_object_int_add(json_route, "metricCostE2",
+ route->path.u.cost_e2);
+
+ json_object_int_add(json_route, "pathsCount",
+ route->paths->count);
+ json_object_int_add(json_route, "nextHopCount",
+ route->nh_list->count);
+ } else {
+ vty_out(vty, "Metric Type: %d\n", route->path.metric_type);
+ vty_out(vty, "Metric: %d (%d)\n", route->path.cost,
+ route->path.u.cost_e2);
+
+ vty_out(vty, "Paths count: %u\n", route->paths->count);
+ vty_out(vty, "Nexthop count: %u\n", route->nh_list->count);
+ }
- vty_out(vty, "Paths count: %u\n", route->paths->count);
- vty_out(vty, "Nexthop count: %u\n", route->nh_list->count);
/* Nexthops */
- vty_out(vty, "Nexthop:\n");
+ if (use_json)
+ json_array_next_hops = json_object_new_array();
+ else
+ vty_out(vty, "Nexthop:\n");
+
for (ALL_LIST_ELEMENTS_RO(route->nh_list, node, nh)) {
struct interface *ifp;
- /* nexthop */
-
ifp = if_lookup_by_index_all_vrf(nh->ifindex);
- vty_out(vty, " %pI6 %.*s\n", &nh->address, IFNAMSIZ, ifp->name);
+ /* nexthop */
+ if (use_json) {
+ inet_ntop(AF_INET6, &nh->address, nexthop,
+ sizeof(nexthop));
+ json_next_hop = json_object_new_object();
+ json_object_string_add(json_next_hop, "nextHop",
+ nexthop);
+ json_object_string_add(json_next_hop, "interfaceName",
+ ifp->name);
+ json_object_array_add(json_array_next_hops,
+ json_next_hop);
+ } else
+ vty_out(vty, " %pI6 %.*s\n", &nh->address, IFNAMSIZ,
+ ifp->name);
}
- vty_out(vty, "\n");
+ if (use_json) {
+ json_object_object_add(json_route, "nextHops",
+ json_array_next_hops);
+ json_object_array_add(json_array_routes, json_route);
+ } else
+ vty_out(vty, "\n");
}
static void ospf6_route_show_table_summary(struct vty *vty,
- struct ospf6_route_table *table)
+ struct ospf6_route_table *table,
+ json_object *json, bool use_json)
{
struct ospf6_route *route, *prev = NULL;
int i, pathtype[OSPF6_PATH_TYPE_MAX];
unsigned int number = 0;
int nh_count = 0, nhinval = 0, ecmp = 0;
int alternative = 0, destination = 0;
+ char path_str[30];
for (i = 0; i < OSPF6_PATH_TYPE_MAX; i++)
pathtype[i] = 0;
@@ -1203,111 +1344,164 @@ static void ospf6_route_show_table_summary(struct vty *vty,
}
assert(number == table->count);
-
- vty_out(vty, "Number of OSPFv3 routes: %d\n", number);
- vty_out(vty, "Number of Destination: %d\n", destination);
- vty_out(vty, "Number of Alternative routes: %d\n", alternative);
- vty_out(vty, "Number of Equal Cost Multi Path: %d\n", ecmp);
+ if (use_json) {
+ json_object_int_add(json, "numberOfOspfv3Routes", number);
+ json_object_int_add(json, "numberOfDestination", destination);
+ json_object_int_add(json, "numberOfAlternativeRoutes",
+ alternative);
+ json_object_int_add(json, "numberOfEcmp", ecmp);
+ } else {
+ vty_out(vty, "Number of OSPFv3 routes: %d\n", number);
+ vty_out(vty, "Number of Destination: %d\n", destination);
+ vty_out(vty, "Number of Alternative routes: %d\n", alternative);
+ vty_out(vty, "Number of Equal Cost Multi Path: %d\n", ecmp);
+ }
for (i = OSPF6_PATH_TYPE_INTRA; i <= OSPF6_PATH_TYPE_EXTERNAL2; i++) {
- vty_out(vty, "Number of %s routes: %d\n",
- OSPF6_PATH_TYPE_NAME(i), pathtype[i]);
+ if (use_json) {
+ snprintf(path_str, sizeof(path_str), "numberOf%sRoutes",
+ OSPF6_PATH_TYPE_JSON(i));
+ json_object_int_add(json, path_str, pathtype[i]);
+ } else
+ vty_out(vty, "Number of %s routes: %d\n",
+ OSPF6_PATH_TYPE_NAME(i), pathtype[i]);
}
}
static void ospf6_route_show_table_prefix(struct vty *vty,
struct prefix *prefix,
- struct ospf6_route_table *table)
+ struct ospf6_route_table *table,
+ json_object *json, bool use_json)
{
struct ospf6_route *route;
+ json_object *json_array_routes = NULL;
route = ospf6_route_lookup(prefix, table);
if (route == NULL)
return;
+ if (use_json)
+ json_array_routes = json_object_new_array();
ospf6_route_lock(route);
while (route && ospf6_route_is_prefix(prefix, route)) {
/* Specifying a prefix will always display details */
- ospf6_route_show_detail(vty, route);
+ ospf6_route_show_detail(vty, route, json_array_routes,
+ use_json);
route = ospf6_route_next(route);
}
+
+ if (use_json)
+ json_object_object_add(json, "routes", json_array_routes);
if (route)
ospf6_route_unlock(route);
}
static void ospf6_route_show_table_address(struct vty *vty,
struct prefix *prefix,
- struct ospf6_route_table *table)
+ struct ospf6_route_table *table,
+ json_object *json, bool use_json)
{
struct ospf6_route *route;
+ json_object *json_array_routes = NULL;
route = ospf6_route_lookup_bestmatch(prefix, table);
if (route == NULL)
return;
+ if (use_json)
+ json_array_routes = json_object_new_array();
prefix = &route->prefix;
ospf6_route_lock(route);
while (route && ospf6_route_is_prefix(prefix, route)) {
/* Specifying a prefix will always display details */
- ospf6_route_show_detail(vty, route);
+ ospf6_route_show_detail(vty, route, json_array_routes,
+ use_json);
route = ospf6_route_next(route);
}
+ if (use_json)
+ json_object_object_add(json, "routes", json_array_routes);
if (route)
ospf6_route_unlock(route);
}
static void ospf6_route_show_table_match(struct vty *vty, int detail,
struct prefix *prefix,
- struct ospf6_route_table *table)
+ struct ospf6_route_table *table,
+ json_object *json, bool use_json)
{
struct ospf6_route *route;
+ json_object *json_array_routes = NULL;
+
assert(prefix->family);
route = ospf6_route_match_head(prefix, table);
+ if (use_json)
+ json_array_routes = json_object_new_array();
while (route) {
if (detail)
- ospf6_route_show_detail(vty, route);
+ ospf6_route_show_detail(vty, route, json_array_routes,
+ use_json);
else
- ospf6_route_show(vty, route);
+ ospf6_route_show(vty, route, json_array_routes,
+ use_json);
route = ospf6_route_match_next(prefix, route);
}
+ if (use_json)
+ json_object_object_add(json, "routes", json_array_routes);
}
static void ospf6_route_show_table_type(struct vty *vty, int detail,
uint8_t type,
- struct ospf6_route_table *table)
+ struct ospf6_route_table *table,
+ json_object *json, bool use_json)
{
struct ospf6_route *route;
+ json_object *json_array_routes = NULL;
route = ospf6_route_head(table);
+ if (use_json)
+ json_array_routes = json_object_new_array();
while (route) {
if (route->path.type == type) {
if (detail)
- ospf6_route_show_detail(vty, route);
+ ospf6_route_show_detail(vty, route,
+ json_array_routes,
+ use_json);
else
- ospf6_route_show(vty, route);
+ ospf6_route_show(vty, route, json_array_routes,
+ use_json);
}
route = ospf6_route_next(route);
}
+ if (use_json)
+ json_object_object_add(json, "routes", json_array_routes);
}
static void ospf6_route_show_table(struct vty *vty, int detail,
- struct ospf6_route_table *table)
+ struct ospf6_route_table *table,
+ json_object *json, bool use_json)
{
struct ospf6_route *route;
+ json_object *json_array_routes = NULL;
route = ospf6_route_head(table);
+ if (use_json)
+ json_array_routes = json_object_new_array();
while (route) {
if (detail)
- ospf6_route_show_detail(vty, route);
+ ospf6_route_show_detail(vty, route, json_array_routes,
+ use_json);
else
- ospf6_route_show(vty, route);
+ ospf6_route_show(vty, route, json_array_routes,
+ use_json);
route = ospf6_route_next(route);
}
+ if (use_json)
+ json_object_object_add(json, "routes", json_array_routes);
}
int ospf6_route_table_show(struct vty *vty, int argc_start, int argc,
struct cmd_token **argv,
- struct ospf6_route_table *table)
+ struct ospf6_route_table *table, bool use_json)
{
int summary = 0;
int match = 0;
@@ -1317,10 +1511,15 @@ int ospf6_route_table_show(struct vty *vty, int argc_start, int argc,
int i, ret;
struct prefix prefix;
uint8_t type = 0;
+ int arg_end = use_json ? (argc - 1) : argc;
+ json_object *json = NULL;
memset(&prefix, 0, sizeof(struct prefix));
- for (i = argc_start; i < argc; i++) {
+ if (use_json)
+ json = json_object_new_object();
+
+ for (i = argc_start; i < arg_end; i++) {
if (strmatch(argv[i]->text, "summary")) {
summary++;
continue;
@@ -1363,14 +1562,24 @@ int ospf6_route_table_show(struct vty *vty, int argc_start, int argc,
slash++;
continue;
}
+ if (use_json)
+ json_object_string_add(json, "malformedArgument",
+ argv[i]->arg);
+ else
+ vty_out(vty, "Malformed argument: %s\n", argv[i]->arg);
- vty_out(vty, "Malformed argument: %s\n", argv[i]->arg);
return CMD_SUCCESS;
}
/* Give summary of this route table */
if (summary) {
- ospf6_route_show_table_summary(vty, table);
+ ospf6_route_show_table_summary(vty, table, json, use_json);
+ if (use_json) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
return CMD_SUCCESS;
}
@@ -1378,20 +1587,36 @@ int ospf6_route_table_show(struct vty *vty, int argc_start, int argc,
if (isprefix && !match) {
/* If exact address, give best matching route */
if (!slash)
- ospf6_route_show_table_address(vty, &prefix, table);
+ ospf6_route_show_table_address(vty, &prefix, table,
+ json, use_json);
else
- ospf6_route_show_table_prefix(vty, &prefix, table);
-
+ ospf6_route_show_table_prefix(vty, &prefix, table, json,
+ use_json);
+
+ if (use_json) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
return CMD_SUCCESS;
}
if (match)
- ospf6_route_show_table_match(vty, detail, &prefix, table);
+ ospf6_route_show_table_match(vty, detail, &prefix, table, json,
+ use_json);
else if (type)
- ospf6_route_show_table_type(vty, detail, type, table);
+ ospf6_route_show_table_type(vty, detail, type, table, json,
+ use_json);
else
- ospf6_route_show_table(vty, detail, table);
+ ospf6_route_show_table(vty, detail, table, json, use_json);
+ if (use_json) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
return CMD_SUCCESS;
}
@@ -1439,7 +1664,7 @@ static void ospf6_linkstate_show_table_exact(struct vty *vty,
ospf6_route_lock(route);
while (route && ospf6_route_is_prefix(prefix, route)) {
/* Specifying a prefix will always display details */
- ospf6_route_show_detail(vty, route);
+ ospf6_route_show_detail(vty, route, NULL, false);
route = ospf6_route_next(route);
}
if (route)
@@ -1457,7 +1682,7 @@ static void ospf6_linkstate_show_table(struct vty *vty, int detail,
route = ospf6_route_head(table);
while (route) {
if (detail)
- ospf6_route_show_detail(vty, route);
+ ospf6_route_show_detail(vty, route, NULL, false);
else
ospf6_linkstate_show(vty, route);
route = ospf6_route_next(route);
diff --git a/ospf6d/ospf6_route.h b/ospf6d/ospf6_route.h
index e2118003d5..a791a82cd4 100644
--- a/ospf6d/ospf6_route.h
+++ b/ospf6d/ospf6_route.h
@@ -23,6 +23,7 @@
#include "command.h"
#include "zclient.h"
+#include "lib/json.h"
#define OSPF6_MULTI_PATH_LIMIT 4
@@ -233,6 +234,9 @@ extern const char *const ospf6_path_type_substr[OSPF6_PATH_TYPE_MAX];
#define OSPF6_PATH_TYPE_SUBSTR(x) \
(0 < (x) && (x) < OSPF6_PATH_TYPE_MAX ? ospf6_path_type_substr[(x)] \
: ospf6_path_type_substr[0])
+#define OSPF6_PATH_TYPE_JSON(x) \
+ (0 < (x) && (x) < OSPF6_PATH_TYPE_MAX ? ospf6_path_type_json[(x)] \
+ : ospf6_path_type_json[0])
#define OSPF6_ROUTE_ADDRESS_STR "Display the route bestmatches the address\n"
#define OSPF6_ROUTE_PREFIX_STR "Display the route\n"
@@ -326,11 +330,14 @@ extern void ospf6_route_table_delete(struct ospf6_route_table *table);
extern void ospf6_route_dump(struct ospf6_route_table *table);
-extern void ospf6_route_show(struct vty *vty, struct ospf6_route *route);
-extern void ospf6_route_show_detail(struct vty *vty, struct ospf6_route *route);
+extern void ospf6_route_show(struct vty *vty, struct ospf6_route *route,
+ json_object *json, bool use_json);
+extern void ospf6_route_show_detail(struct vty *vty, struct ospf6_route *route,
+ json_object *json, bool use_json);
+
extern int ospf6_route_table_show(struct vty *, int, int, struct cmd_token **,
- struct ospf6_route_table *);
+ struct ospf6_route_table *, bool use_json);
extern int ospf6_linkstate_table_show(struct vty *vty, int idx_ipv4, int argc,
struct cmd_token **argv,
struct ospf6_route_table *table);
diff --git a/ospf6d/ospf6_spf.c b/ospf6d/ospf6_spf.c
index 70771c6060..f94252991c 100644
--- a/ospf6d/ospf6_spf.c
+++ b/ospf6d/ospf6_spf.c
@@ -432,9 +432,8 @@ void ospf6_spf_table_finish(struct ospf6_route_table *result_table)
}
}
-static const char *const ospf6_spf_reason_str[] = {
- "R+", "R-", "N+", "N-", "L+", "L-", "R*", "N*",
-};
+static const char *const ospf6_spf_reason_str[] = {"R+", "R-", "N+", "N-", "L+",
+ "L-", "R*", "N*", "C"};
void ospf6_spf_reason_string(unsigned int reason, char *buf, int size)
{
@@ -655,7 +654,7 @@ static int ospf6_spf_calculation_thread(struct thread *t)
(long long)runtime.tv_usec);
zlog_info(
- "SPF processing: # Areas: %d, SPF runtime: %lld sec %lld usec, Reason: %s\n",
+ "SPF processing: # Areas: %d, SPF runtime: %lld sec %lld usec, Reason: %s",
areas_processed, (long long)runtime.tv_sec,
(long long)runtime.tv_usec, rbuf);
diff --git a/ospf6d/ospf6_spf.h b/ospf6d/ospf6_spf.h
index 853ce4de07..253888d8ce 100644
--- a/ospf6d/ospf6_spf.h
+++ b/ospf6d/ospf6_spf.h
@@ -88,6 +88,7 @@ struct ospf6_vertex {
#define OSPF6_SPF_FLAGS_LINK_LSA_REMOVED (1 << 5)
#define OSPF6_SPF_FLAGS_ROUTER_LSA_ORIGINATED (1 << 6)
#define OSPF6_SPF_FLAGS_NETWORK_LSA_ORIGINATED (1 << 7)
+#define OSPF6_SPF_FLAGS_CONFIG_CHANGE (1 << 8)
static inline void ospf6_set_spf_reason(struct ospf6 *ospf, unsigned int reason)
{
diff --git a/ospf6d/ospf6_top.c b/ospf6d/ospf6_top.c
index 7b4ed84d53..3f72ec828e 100644
--- a/ospf6d/ospf6_top.c
+++ b/ospf6d/ospf6_top.c
@@ -51,6 +51,7 @@
#include "ospf6_intra.h"
#include "ospf6_spf.h"
#include "ospf6d.h"
+#include "lib/json.h"
DEFINE_QOBJ_TYPE(ospf6)
@@ -267,6 +268,8 @@ static struct ospf6 *ospf6_create(const char *name)
o->distance_table = route_table_init();
o->fd = -1;
+ o->max_multipath = MULTIPATH_NUM;
+
QOBJ_REG(o, ospf6);
/* Make ospf protocol socket. */
@@ -718,39 +721,6 @@ DEFUN (no_ospf6_distance_ospf6,
return CMD_SUCCESS;
}
-#if 0
-DEFUN (ospf6_distance_source,
- ospf6_distance_source_cmd,
- "distance (1-255) X:X::X:X/M [WORD]",
- "Administrative distance\n"
- "Distance value\n"
- "IP source prefix\n"
- "Access list name\n")
-{
- VTY_DECLVAR_CONTEXT(ospf6, o);
- char *alname = (argc == 4) ? argv[3]->arg : NULL;
- ospf6_distance_set (vty, o, argv[1]->arg, argv[2]->arg, alname);
-
- return CMD_SUCCESS;
-}
-
-DEFUN (no_ospf6_distance_source,
- no_ospf6_distance_source_cmd,
- "no distance (1-255) X:X::X:X/M [WORD]",
- NO_STR
- "Administrative distance\n"
- "Distance value\n"
- "IP source prefix\n"
- "Access list name\n")
-{
- VTY_DECLVAR_CONTEXT(ospf6, o);
- char *alname = (argc == 5) ? argv[4]->arg : NULL;
- ospf6_distance_unset (vty, o, argv[2]->arg, argv[3]->arg, alname);
-
- return CMD_SUCCESS;
-}
-#endif
-
DEFUN (ospf6_interface_area,
ospf6_interface_area_cmd,
"interface IFNAME area <A.B.C.D|(0-4294967295)>",
@@ -911,54 +881,61 @@ DEFUN (no_ospf6_stub_router_admin,
return CMD_SUCCESS;
}
-#if 0
-DEFUN (ospf6_stub_router_startup,
- ospf6_stub_router_startup_cmd,
- "stub-router on-startup (5-86400)",
- "Make router a stub router\n"
- "Advertise inability to be a transit router\n"
- "Automatically advertise as stub-router on startup of OSPF6\n"
- "Time (seconds) to advertise self as stub-router\n")
+/* Restart OSPF SPF algorithm*/
+static void ospf6_restart_spf(struct ospf6 *ospf6)
{
- return CMD_SUCCESS;
+ ospf6_route_remove_all(ospf6->route_table);
+ ospf6_route_remove_all(ospf6->brouter_table);
+ ospf6_route_remove_all(ospf6->external_table);
+
+ /* Trigger SPF */
+ ospf6_spf_schedule(ospf6, OSPF6_SPF_FLAGS_CONFIG_CHANGE);
}
-DEFUN (no_ospf6_stub_router_startup,
- no_ospf6_stub_router_startup_cmd,
- "no stub-router on-startup",
- NO_STR
- "Make router a stub router\n"
- "Advertise inability to be a transit router\n"
- "Automatically advertise as stub-router on startup of OSPF6\n"
- "Time (seconds) to advertise self as stub-router\n")
+/* Set the max paths */
+static void ospf6_maxpath_set(struct ospf6 *ospf6, uint16_t paths)
{
- return CMD_SUCCESS;
+ if (ospf6->max_multipath == paths)
+ return;
+
+ ospf6->max_multipath = paths;
+
+ /* Send deletion to zebra to delete all
+ * ospf specific routes and reinitiate
+ * SPF to reflect the new max multipath.
+ */
+ ospf6_restart_spf(ospf6);
}
-DEFUN (ospf6_stub_router_shutdown,
- ospf6_stub_router_shutdown_cmd,
- "stub-router on-shutdown (5-86400)",
- "Make router a stub router\n"
- "Advertise inability to be a transit router\n"
- "Automatically advertise as stub-router before shutdown\n"
- "Time (seconds) to advertise self as stub-router\n")
+/* Ospf Maximum-paths config support */
+DEFUN(ospf6_max_multipath,
+ ospf6_max_multipath_cmd,
+ "maximum-paths " CMD_RANGE_STR(1, MULTIPATH_NUM),
+ "Max no of multiple paths for ECMP support\n"
+ "Number of paths\n")
{
- return CMD_SUCCESS;
+ VTY_DECLVAR_CONTEXT(ospf6, ospf6);
+ int idx_number = 1;
+ int maximum_paths = strtol(argv[idx_number]->arg, NULL, 10);
+
+ ospf6_maxpath_set(ospf6, maximum_paths);
+
+ return CMD_SUCCESS;
}
-DEFUN (no_ospf6_stub_router_shutdown,
- no_ospf6_stub_router_shutdown_cmd,
- "no stub-router on-shutdown",
- NO_STR
- "Make router a stub router\n"
- "Advertise inability to be a transit router\n"
- "Automatically advertise as stub-router before shutdown\n"
- "Time (seconds) to advertise self as stub-router\n")
+DEFUN(no_ospf6_max_multipath,
+ no_ospf6_max_multipath_cmd,
+ "no maximum-paths [" CMD_RANGE_STR(1, MULTIPATH_NUM)"]",
+ NO_STR
+ "Max no of multiple paths for ECMP support\n"
+ "Number of paths\n")
{
- return CMD_SUCCESS;
-}
-#endif
+ VTY_DECLVAR_CONTEXT(ospf6, ospf6);
+ ospf6_maxpath_set(ospf6, MULTIPATH_NUM);
+
+ return CMD_SUCCESS;
+}
static void ospf6_show(struct vty *vty, struct ospf6 *o, json_object *json,
bool use_json)
@@ -998,6 +975,7 @@ static void ospf6_show(struct vty *vty, struct ospf6 *o, json_object *json,
json_object_int_add(json, "holdTimeMultiplier",
o->spf_hold_multiplier);
+ json_object_int_add(json, "maximumPaths", o->max_multipath);
if (o->ts_spf.tv_sec || o->ts_spf.tv_usec) {
timersub(&now, &o->ts_spf, &result);
@@ -1080,6 +1058,7 @@ static void ospf6_show(struct vty *vty, struct ospf6 *o, json_object *json,
vty_out(vty, " LSA minimum arrival %d msecs\n",
o->lsa_minarrival);
+ vty_out(vty, " Maximum-paths %u\n", o->max_multipath);
/* Show SPF parameters */
vty_out(vty,
@@ -1165,7 +1144,7 @@ DEFUN(show_ipv6_ospf6,
DEFUN (show_ipv6_ospf6_route,
show_ipv6_ospf6_route_cmd,
- "show ipv6 ospf6 route [<intra-area|inter-area|external-1|external-2|X:X::X:X|X:X::X:X/M|detail|summary>]",
+ "show ipv6 ospf6 route [<intra-area|inter-area|external-1|external-2|X:X::X:X|X:X::X:X/M|detail|summary>] [json]",
SHOW_STR
IP6_STR
OSPF6_STR
@@ -1177,41 +1156,44 @@ DEFUN (show_ipv6_ospf6_route,
"Specify IPv6 address\n"
"Specify IPv6 prefix\n"
"Detailed information\n"
- "Summary of route table\n")
+ "Summary of route table\n"
+ JSON_STR)
{
struct ospf6 *ospf6;
+ bool uj = use_json(argc, argv);
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
OSPF6_CMD_CHECK_RUNNING(ospf6);
- ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table);
+ ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table, uj);
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_route_match,
show_ipv6_ospf6_route_match_cmd,
- "show ipv6 ospf6 route X:X::X:X/M <match|longer>",
+ "show ipv6 ospf6 route X:X::X:X/M <match|longer> [json]",
SHOW_STR
IP6_STR
OSPF6_STR
ROUTE_STR
"Specify IPv6 prefix\n"
"Display routes which match the specified route\n"
- "Display routes longer than the specified route\n")
+ "Display routes longer than the specified route\n"
+ JSON_STR)
{
struct ospf6 *ospf6;
+ bool uj = use_json(argc, argv);
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
OSPF6_CMD_CHECK_RUNNING(ospf6);
- ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table);
-
+ ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table, uj);
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_route_match_detail,
show_ipv6_ospf6_route_match_detail_cmd,
- "show ipv6 ospf6 route X:X::X:X/M match detail",
+ "show ipv6 ospf6 route X:X::X:X/M match detail [json]",
SHOW_STR
IP6_STR
OSPF6_STR
@@ -1219,21 +1201,22 @@ DEFUN (show_ipv6_ospf6_route_match_detail,
"Specify IPv6 prefix\n"
"Display routes which match the specified route\n"
"Detailed information\n"
- )
+ JSON_STR)
{
struct ospf6 *ospf6;
+ bool uj = use_json(argc, argv);
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
OSPF6_CMD_CHECK_RUNNING(ospf6);
- ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table);
+ ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table, uj);
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_route_type_detail,
show_ipv6_ospf6_route_type_detail_cmd,
- "show ipv6 ospf6 route <intra-area|inter-area|external-1|external-2> detail",
+ "show ipv6 ospf6 route <intra-area|inter-area|external-1|external-2> detail [json]",
SHOW_STR
IP6_STR
OSPF6_STR
@@ -1243,14 +1226,15 @@ DEFUN (show_ipv6_ospf6_route_type_detail,
"Display Type-1 External routes\n"
"Display Type-2 External routes\n"
"Detailed information\n"
- )
+ JSON_STR)
{
struct ospf6 *ospf6;
+ bool uj = use_json(argc, argv);
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
OSPF6_CMD_CHECK_RUNNING(ospf6);
- ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table);
+ ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table, uj);
return CMD_SUCCESS;
}
@@ -1333,6 +1317,11 @@ static int config_write_ospf6(struct vty *vty)
vty_out(vty, " timers lsa min-arrival %d\n",
ospf6->lsa_minarrival);
+ /* ECMP max path config */
+ if (ospf6->max_multipath != MULTIPATH_NUM)
+ vty_out(vty, " maximum-paths %d\n",
+ ospf6->max_multipath);
+
ospf6_stub_router_config_write(vty, ospf6);
ospf6_redistribute_config_write(vty, ospf6);
ospf6_area_config_write(vty, ospf6);
@@ -1390,20 +1379,13 @@ void ospf6_top_init(void)
install_element(OSPF6_NODE, &no_ospf6_interface_area_cmd);
install_element(OSPF6_NODE, &ospf6_stub_router_admin_cmd);
install_element(OSPF6_NODE, &no_ospf6_stub_router_admin_cmd);
-/* For a later time */
-#if 0
- install_element (OSPF6_NODE, &ospf6_stub_router_startup_cmd);
- install_element (OSPF6_NODE, &no_ospf6_stub_router_startup_cmd);
- install_element (OSPF6_NODE, &ospf6_stub_router_shutdown_cmd);
- install_element (OSPF6_NODE, &no_ospf6_stub_router_shutdown_cmd);
-#endif
+
+ /* maximum-paths command */
+ install_element(OSPF6_NODE, &ospf6_max_multipath_cmd);
+ install_element(OSPF6_NODE, &no_ospf6_max_multipath_cmd);
install_element(OSPF6_NODE, &ospf6_distance_cmd);
install_element(OSPF6_NODE, &no_ospf6_distance_cmd);
install_element(OSPF6_NODE, &ospf6_distance_ospf6_cmd);
install_element(OSPF6_NODE, &no_ospf6_distance_ospf6_cmd);
-#if 0
- install_element (OSPF6_NODE, &ospf6_distance_source_cmd);
- install_element (OSPF6_NODE, &no_ospf6_distance_source_cmd);
-#endif
}
diff --git a/ospf6d/ospf6_top.h b/ospf6d/ospf6_top.h
index 93e25d7599..75dff86cd7 100644
--- a/ospf6d/ospf6_top.h
+++ b/ospf6d/ospf6_top.h
@@ -127,6 +127,11 @@ struct ospf6 {
* update to neighbors immediatly */
uint8_t inst_shutdown;
+ /* Max number of multiple paths
+ * to support ECMP.
+ */
+ uint16_t max_multipath;
+
QOBJ_FIELDS
};
DECLARE_QOBJ_TYPE(ospf6)
diff --git a/ospf6d/ospf6_zebra.c b/ospf6d/ospf6_zebra.c
index 7a8027a37f..2b7072d34f 100644
--- a/ospf6d/ospf6_zebra.c
+++ b/ospf6d/ospf6_zebra.c
@@ -343,7 +343,14 @@ static void ospf6_zebra_route_update(int type, struct ospf6_route *request,
api.safi = SAFI_UNICAST;
api.prefix = *dest;
SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
- api.nexthop_num = MIN(nhcount, MULTIPATH_NUM);
+
+ if (nhcount > ospf6->max_multipath) {
+ if (IS_OSPF6_DEBUG_ZEBRA(SEND))
+ zlog_debug(
+ " Nexthop count is greater than configured maximum-path, hence ignore the extra nexthops");
+ }
+ api.nexthop_num = MIN(nhcount, ospf6->max_multipath);
+
ospf6_route_zebra_copy_nexthops(request, api.nexthops, api.nexthop_num,
api.vrf_id);
SET_FLAG(api.message, ZAPI_MESSAGE_METRIC);
diff --git a/ospf6d/ospf6d.c b/ospf6d/ospf6d.c
index 4b958e550f..8d9c85fd08 100644
--- a/ospf6d/ospf6d.c
+++ b/ospf6d/ospf6d.c
@@ -44,6 +44,7 @@
#include "ospf6_flood.h"
#include "ospf6d.h"
#include "ospf6_bfd.h"
+#include "lib/json.h"
struct route_node *route_prev(struct route_node *node)
{
@@ -154,53 +155,264 @@ static uint16_t parse_type_spec(int idx_lsa, int argc, struct cmd_token **argv)
return type;
}
+void ospf6_lsdb_show(struct vty *vty, enum ospf_lsdb_show_level level,
+ uint16_t *type, uint32_t *id, uint32_t *adv_router,
+ struct ospf6_lsdb *lsdb, json_object *json_obj,
+ bool use_json)
+{
+ struct ospf6_lsa *lsa;
+ const struct route_node *end = NULL;
+ void (*showfunc)(struct vty *, struct ospf6_lsa *, json_object *,
+ bool) = NULL;
+ json_object *json_array = NULL;
+
+ switch (level) {
+ case OSPF6_LSDB_SHOW_LEVEL_DETAIL:
+ showfunc = ospf6_lsa_show;
+ break;
+ case OSPF6_LSDB_SHOW_LEVEL_INTERNAL:
+ showfunc = ospf6_lsa_show_internal;
+ break;
+ case OSPF6_LSDB_SHOW_LEVEL_DUMP:
+ showfunc = ospf6_lsa_show_dump;
+ break;
+ case OSPF6_LSDB_SHOW_LEVEL_NORMAL:
+ default:
+ showfunc = ospf6_lsa_show_summary;
+ }
+
+ if (use_json)
+ json_array = json_object_new_array();
+
+ if (type && id && adv_router) {
+ lsa = ospf6_lsdb_lookup(*type, *id, *adv_router, lsdb);
+ if (lsa) {
+ if (level == OSPF6_LSDB_SHOW_LEVEL_NORMAL)
+ ospf6_lsa_show(vty, lsa, json_array, use_json);
+ else
+ (*showfunc)(vty, lsa, json_array, use_json);
+ }
+
+ if (use_json)
+ json_object_object_add(json_obj, "lsa", json_array);
+ return;
+ }
+
+ if ((level == OSPF6_LSDB_SHOW_LEVEL_NORMAL) && !use_json)
+ ospf6_lsa_show_summary_header(vty);
+
+ end = ospf6_lsdb_head(lsdb, !!type + !!(type && adv_router),
+ type ? *type : 0, adv_router ? *adv_router : 0,
+ &lsa);
+ while (lsa) {
+ if ((!adv_router || lsa->header->adv_router == *adv_router)
+ && (!id || lsa->header->id == *id))
+ (*showfunc)(vty, lsa, json_array, use_json);
+ lsa = ospf6_lsdb_next(end, lsa);
+ }
+
+ if (use_json)
+ json_object_object_add(json_obj, "lsa", json_array);
+}
+
+static void ospf6_lsdb_show_wrapper(struct vty *vty,
+ enum ospf_lsdb_show_level level,
+ uint16_t *type, uint32_t *id,
+ uint32_t *adv_router, bool uj,
+ struct ospf6 *ospf6)
+{
+ struct listnode *i, *j;
+ struct ospf6 *o = ospf6;
+ struct ospf6_area *oa;
+ struct ospf6_interface *oi;
+ json_object *json = NULL;
+ json_object *json_array = NULL;
+ json_object *json_obj = NULL;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_array = json_object_new_array();
+ }
+ for (ALL_LIST_ELEMENTS_RO(o->area_list, i, oa)) {
+ if (uj) {
+ json_obj = json_object_new_object();
+ json_object_string_add(json_obj, "areaId", oa->name);
+ } else
+ vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
+ ospf6_lsdb_show(vty, level, type, id, adv_router, oa->lsdb,
+ json_obj, uj);
+ if (uj)
+ json_object_array_add(json_array, json_obj);
+ }
+ if (uj)
+ json_object_object_add(json, "areaScopedLinkStateDb",
+ json_array);
+
+ if (uj)
+ json_array = json_object_new_array();
+ for (ALL_LIST_ELEMENTS_RO(o->area_list, i, oa)) {
+ for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
+ if (uj) {
+ json_obj = json_object_new_object();
+ json_object_string_add(json_obj, "areaId",
+ oa->name);
+ json_object_string_add(json_obj, "interface",
+ oi->interface->name);
+ } else
+ vty_out(vty, IF_LSDB_TITLE_FORMAT,
+ oi->interface->name, oa->name);
+ ospf6_lsdb_show(vty, level, type, id, adv_router,
+ oi->lsdb, json_obj, uj);
+ if (uj)
+ json_object_array_add(json_array, json_obj);
+ }
+ }
+ if (uj)
+ json_object_object_add(json, "interfaceScopedLinkStateDb",
+ json_array);
+ if (uj) {
+ json_array = json_object_new_array();
+ json_obj = json_object_new_object();
+ } else
+ vty_out(vty, AS_LSDB_TITLE_FORMAT);
+
+ ospf6_lsdb_show(vty, level, type, id, adv_router, o->lsdb, json_obj,
+ uj);
+
+ if (uj) {
+ json_object_array_add(json_array, json_obj);
+ json_object_object_add(json, "asScopedLinkStateDb", json_array);
+
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ } else
+ vty_out(vty, "\n");
+}
+
+static void ospf6_lsdb_type_show_wrapper(struct vty *vty,
+ enum ospf_lsdb_show_level level,
+ uint16_t *type, uint32_t *id,
+ uint32_t *adv_router, bool uj,
+ struct ospf6 *ospf6)
+{
+ struct listnode *i, *j;
+ struct ospf6 *o = ospf6;
+ struct ospf6_area *oa;
+ struct ospf6_interface *oi;
+ json_object *json = NULL;
+ json_object *json_array = NULL;
+ json_object *json_obj = NULL;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_array = json_object_new_array();
+ }
+
+ switch (OSPF6_LSA_SCOPE(*type)) {
+ case OSPF6_SCOPE_AREA:
+ for (ALL_LIST_ELEMENTS_RO(o->area_list, i, oa)) {
+ if (uj) {
+ json_obj = json_object_new_object();
+ json_object_string_add(json_obj, "areaId",
+ oa->name);
+ } else
+ vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
+
+ ospf6_lsdb_show(vty, level, type, id, adv_router,
+ oa->lsdb, json_obj, uj);
+ if (uj)
+ json_object_array_add(json_array, json_obj);
+ }
+ if (uj)
+ json_object_object_add(json, "areaScopedLinkStateDb",
+ json_array);
+ break;
+
+ case OSPF6_SCOPE_LINKLOCAL:
+ for (ALL_LIST_ELEMENTS_RO(o->area_list, i, oa)) {
+ for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
+ if (uj) {
+ json_obj = json_object_new_object();
+ json_object_string_add(
+ json_obj, "areaId", oa->name);
+ json_object_string_add(
+ json_obj, "interface",
+ oi->interface->name);
+ } else
+ vty_out(vty, IF_LSDB_TITLE_FORMAT,
+ oi->interface->name, oa->name);
+
+ ospf6_lsdb_show(vty, level, type, id,
+ adv_router, oi->lsdb, json_obj,
+ uj);
+
+ if (uj)
+ json_object_array_add(json_array,
+ json_obj);
+ }
+ }
+ if (uj)
+ json_object_object_add(
+ json, "interfaceScopedLinkStateDb", json_array);
+ break;
+
+ case OSPF6_SCOPE_AS:
+ if (uj)
+ json_obj = json_object_new_object();
+ else
+ vty_out(vty, AS_LSDB_TITLE_FORMAT);
+
+ ospf6_lsdb_show(vty, level, type, id, adv_router, o->lsdb,
+ json_obj, uj);
+ if (uj) {
+ json_object_array_add(json_array, json_obj);
+ json_object_object_add(json, "asScopedLinkStateDb",
+ json_array);
+ }
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+ if (uj) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ } else
+ vty_out(vty, "\n");
+}
+
DEFUN (show_ipv6_ospf6_database,
show_ipv6_ospf6_database_cmd,
- "show ipv6 ospf6 database [<detail|dump|internal>]",
+ "show ipv6 ospf6 database [<detail|dump|internal>] [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
"Display Link state database\n"
"Display details of LSAs\n"
"Dump LSAs\n"
- "Display LSA's internal information\n")
+ "Display LSA's internal information\n"
+ JSON_STR)
{
int idx_level = 4;
int level;
- struct listnode *i, *j;
+ bool uj = use_json(argc, argv);
struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
-
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
OSPF6_CMD_CHECK_RUNNING(ospf6);
level = parse_show_level(idx_level, argc, argv);
-
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, NULL, NULL, NULL, oa->lsdb);
- }
-
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT, oi->interface->name,
- oa->name);
- ospf6_lsdb_show(vty, level, NULL, NULL, NULL, oi->lsdb);
- }
- }
-
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, NULL, NULL, NULL, ospf6->lsdb);
-
- vty_out(vty, "\n");
+ ospf6_lsdb_show_wrapper(vty, level, NULL, NULL, NULL, uj, ospf6);
return CMD_SUCCESS;
}
-DEFUN (show_ipv6_ospf6_database_type,
- show_ipv6_ospf6_database_type_cmd,
- "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> [<detail|dump|internal>]",
+DEFUN (show_ipv6_ospf6_database_type, show_ipv6_ospf6_database_type_cmd,
+ "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> [<detail|dump|internal>] [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
@@ -217,16 +429,14 @@ DEFUN (show_ipv6_ospf6_database_type,
"Display details of LSAs\n"
"Dump LSAs\n"
"Display LSA's internal information\n"
- )
+ JSON_STR)
{
int idx_lsa = 4;
int idx_level = 5;
int level;
- struct listnode *i, *j;
- struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
uint16_t type = 0;
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6;
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
@@ -235,43 +445,13 @@ DEFUN (show_ipv6_ospf6_database_type,
type = parse_type_spec(idx_lsa, argc, argv);
level = parse_show_level(idx_level, argc, argv);
- switch (OSPF6_LSA_SCOPE(type)) {
- case OSPF6_SCOPE_AREA:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, &type, NULL, NULL,
- oa->lsdb);
- }
- break;
-
- case OSPF6_SCOPE_LINKLOCAL:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT,
- oi->interface->name, oa->name);
- ospf6_lsdb_show(vty, level, &type, NULL, NULL,
- oi->lsdb);
- }
- }
- break;
-
- case OSPF6_SCOPE_AS:
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, &type, NULL, NULL, ospf6->lsdb);
- break;
-
- default:
- assert(0);
- break;
- }
-
- vty_out(vty, "\n");
+ ospf6_lsdb_type_show_wrapper(vty, level, &type, NULL, NULL, uj, ospf6);
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_database_id,
show_ipv6_ospf6_database_id_cmd,
- "show ipv6 ospf6 database <*|linkstate-id> A.B.C.D [<detail|dump|internal>]",
+ "show ipv6 ospf6 database <*|linkstate-id> A.B.C.D [<detail|dump|internal>] [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
@@ -281,16 +461,15 @@ DEFUN (show_ipv6_ospf6_database_id,
"Specify Link state ID as IPv4 address notation\n"
"Display details of LSAs\n"
"Dump LSAs\n"
- "Display LSA's internal information\n")
+ "Display LSA's internal information\n"
+ JSON_STR)
{
int idx_ipv4 = 5;
int idx_level = 6;
int level;
- struct listnode *i, *j;
- struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
uint32_t id = 0;
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6;
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
@@ -300,30 +479,14 @@ DEFUN (show_ipv6_ospf6_database_id,
inet_pton(AF_INET, argv[idx_ipv4]->arg, &id);
level = parse_show_level(idx_level, argc, argv);
+ ospf6_lsdb_show_wrapper(vty, level, NULL, &id, NULL, uj, ospf6);
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, NULL, &id, NULL, oa->lsdb);
- }
-
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT, oi->interface->name,
- oa->name);
- ospf6_lsdb_show(vty, level, NULL, &id, NULL, oi->lsdb);
- }
- }
-
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, NULL, &id, NULL, ospf6->lsdb);
-
- vty_out(vty, "\n");
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_database_router,
show_ipv6_ospf6_database_router_cmd,
- "show ipv6 ospf6 database <*|adv-router> * A.B.C.D <detail|dump|internal>",
+ "show ipv6 ospf6 database <*|adv-router> * A.B.C.D <detail|dump|internal> [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
@@ -334,16 +497,15 @@ DEFUN (show_ipv6_ospf6_database_router,
"Specify Advertising Router as IPv4 address notation\n"
"Display details of LSAs\n"
"Dump LSAs\n"
- "Display LSA's internal information\n")
+ "Display LSA's internal information\n"
+ JSON_STR)
{
int idx_ipv4 = 6;
int idx_level = 7;
int level;
- struct listnode *i, *j;
- struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
uint32_t adv_router = 0;
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6;
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
@@ -351,24 +513,7 @@ DEFUN (show_ipv6_ospf6_database_router,
inet_pton(AF_INET, argv[idx_ipv4]->arg, &adv_router);
level = parse_show_level(idx_level, argc, argv);
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, NULL, NULL, &adv_router, oa->lsdb);
- }
-
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT, oi->interface->name,
- oa->name);
- ospf6_lsdb_show(vty, level, NULL, NULL, &adv_router,
- oi->lsdb);
- }
- }
-
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, NULL, NULL, &adv_router, ospf6->lsdb);
-
- vty_out(vty, "\n");
+ ospf6_lsdb_show_wrapper(vty, level, NULL, NULL, &adv_router, uj, ospf6);
return CMD_SUCCESS;
}
@@ -408,7 +553,7 @@ DEFUN_HIDDEN (show_ipv6_ospf6_database_aggr_router,
return CMD_SUCCESS;
}
ospf6_lsdb_show(vty, level, &type, NULL, NULL,
- oa->temp_router_lsa_lsdb);
+ oa->temp_router_lsa_lsdb, NULL, false);
/* Remove the temp cache */
ospf6_remove_temp_router_lsa(oa);
}
@@ -420,7 +565,7 @@ DEFUN_HIDDEN (show_ipv6_ospf6_database_aggr_router,
DEFUN (show_ipv6_ospf6_database_type_id,
show_ipv6_ospf6_database_type_id_cmd,
- "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> linkstate-id A.B.C.D [<detail|dump|internal>]",
+ "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> linkstate-id A.B.C.D [<detail|dump|internal>] [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
@@ -439,18 +584,16 @@ DEFUN (show_ipv6_ospf6_database_type_id,
"Display details of LSAs\n"
"Dump LSAs\n"
"Display LSA's internal information\n"
- )
+ JSON_STR)
{
int idx_lsa = 4;
int idx_ipv4 = 6;
int idx_level = 7;
int level;
- struct listnode *i, *j;
- struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
uint16_t type = 0;
uint32_t id = 0;
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6;
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
@@ -460,42 +603,13 @@ DEFUN (show_ipv6_ospf6_database_type_id,
inet_pton(AF_INET, argv[idx_ipv4]->arg, &id);
level = parse_show_level(idx_level, argc, argv);
- switch (OSPF6_LSA_SCOPE(type)) {
- case OSPF6_SCOPE_AREA:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, &type, &id, NULL, oa->lsdb);
- }
- break;
-
- case OSPF6_SCOPE_LINKLOCAL:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT,
- oi->interface->name, oa->name);
- ospf6_lsdb_show(vty, level, &type, &id, NULL,
- oi->lsdb);
- }
- }
- break;
-
- case OSPF6_SCOPE_AS:
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, &type, &id, NULL, ospf6->lsdb);
- break;
-
- default:
- assert(0);
- break;
- }
-
- vty_out(vty, "\n");
+ ospf6_lsdb_type_show_wrapper(vty, level, &type, &id, NULL, uj, ospf6);
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_database_type_router,
show_ipv6_ospf6_database_type_router_cmd,
- "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> <*|adv-router> A.B.C.D [<detail|dump|internal>]",
+ "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> <*|adv-router> A.B.C.D [<detail|dump|internal>] [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
@@ -515,18 +629,16 @@ DEFUN (show_ipv6_ospf6_database_type_router,
"Display details of LSAs\n"
"Dump LSAs\n"
"Display LSA's internal information\n"
- )
+ JSON_STR)
{
int idx_lsa = 4;
int idx_ipv4 = 6;
int idx_level = 7;
int level;
- struct listnode *i, *j;
- struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
uint16_t type = 0;
uint32_t adv_router = 0;
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6;
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
@@ -535,45 +647,15 @@ DEFUN (show_ipv6_ospf6_database_type_router,
inet_pton(AF_INET, argv[idx_ipv4]->arg, &adv_router);
level = parse_show_level(idx_level, argc, argv);
- switch (OSPF6_LSA_SCOPE(type)) {
- case OSPF6_SCOPE_AREA:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, &type, NULL, &adv_router,
- oa->lsdb);
- }
- break;
-
- case OSPF6_SCOPE_LINKLOCAL:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT,
- oi->interface->name, oa->name);
- ospf6_lsdb_show(vty, level, &type, NULL,
- &adv_router, oi->lsdb);
- }
- }
- break;
-
- case OSPF6_SCOPE_AS:
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, &type, NULL, &adv_router,
- ospf6->lsdb);
- break;
-
- default:
- assert(0);
- break;
- }
-
- vty_out(vty, "\n");
+ ospf6_lsdb_type_show_wrapper(vty, level, &type, NULL, &adv_router, uj,
+ ospf6);
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_database_id_router,
show_ipv6_ospf6_database_id_router_cmd,
- "show ipv6 ospf6 database * A.B.C.D A.B.C.D [<detail|dump|internal>]",
+ "show ipv6 ospf6 database * A.B.C.D A.B.C.D [<detail|dump|internal>] [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
@@ -584,18 +666,16 @@ DEFUN (show_ipv6_ospf6_database_id_router,
"Display details of LSAs\n"
"Dump LSAs\n"
"Display LSA's internal information\n"
- )
+ JSON_STR)
{
int idx_ls_id = 5;
int idx_adv_rtr = 6;
int idx_level = 7;
int level;
- struct listnode *i, *j;
- struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
uint32_t id = 0;
uint32_t adv_router = 0;
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6;
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
OSPF6_CMD_CHECK_RUNNING(ospf6);
@@ -603,31 +683,14 @@ DEFUN (show_ipv6_ospf6_database_id_router,
inet_pton(AF_INET, argv[idx_adv_rtr]->arg, &adv_router);
level = parse_show_level(idx_level, argc, argv);
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, NULL, &id, &adv_router, oa->lsdb);
- }
-
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT, oi->interface->name,
- oa->name);
- ospf6_lsdb_show(vty, level, NULL, &id, &adv_router,
- oi->lsdb);
- }
- }
-
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, NULL, &id, &adv_router, ospf6->lsdb);
-
- vty_out(vty, "\n");
+ ospf6_lsdb_show_wrapper(vty, level, NULL, &id, &adv_router, uj, ospf6);
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_database_adv_router_linkstate_id,
show_ipv6_ospf6_database_adv_router_linkstate_id_cmd,
- "show ipv6 ospf6 database adv-router A.B.C.D linkstate-id A.B.C.D [<detail|dump|internal>]",
+ "show ipv6 ospf6 database adv-router A.B.C.D linkstate-id A.B.C.D [<detail|dump|internal>] [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
@@ -638,18 +701,17 @@ DEFUN (show_ipv6_ospf6_database_adv_router_linkstate_id,
"Specify Link state ID as IPv4 address notation\n"
"Display details of LSAs\n"
"Dump LSAs\n"
- "Display LSA's internal information\n")
+ "Display LSA's internal information\n"
+ JSON_STR)
{
int idx_adv_rtr = 5;
int idx_ls_id = 7;
int idx_level = 8;
int level;
- struct listnode *i, *j;
- struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
uint32_t id = 0;
uint32_t adv_router = 0;
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6;
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
@@ -658,30 +720,13 @@ DEFUN (show_ipv6_ospf6_database_adv_router_linkstate_id,
inet_pton(AF_INET, argv[idx_ls_id]->arg, &id);
level = parse_show_level(idx_level, argc, argv);
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, NULL, &id, &adv_router, oa->lsdb);
- }
-
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT, oi->interface->name,
- oa->name);
- ospf6_lsdb_show(vty, level, NULL, &id, &adv_router,
- oi->lsdb);
- }
- }
-
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, NULL, &id, &adv_router, ospf6->lsdb);
-
- vty_out(vty, "\n");
+ ospf6_lsdb_show_wrapper(vty, level, NULL, &id, &adv_router, uj, ospf6);
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_database_type_id_router,
show_ipv6_ospf6_database_type_id_router_cmd,
- "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> A.B.C.D A.B.C.D [<dump|internal>]",
+ "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> A.B.C.D A.B.C.D [<dump|internal>] [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
@@ -698,20 +743,19 @@ DEFUN (show_ipv6_ospf6_database_type_id_router,
"Specify Link state ID as IPv4 address notation\n"
"Specify Advertising Router as IPv4 address notation\n"
"Dump LSAs\n"
- "Display LSA's internal information\n")
+ "Display LSA's internal information\n"
+ JSON_STR)
{
int idx_lsa = 4;
int idx_ls_id = 5;
int idx_adv_rtr = 6;
int idx_level = 7;
int level;
- struct listnode *i, *j;
- struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
uint16_t type = 0;
uint32_t id = 0;
uint32_t adv_router = 0;
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6;
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
@@ -722,45 +766,15 @@ DEFUN (show_ipv6_ospf6_database_type_id_router,
inet_pton(AF_INET, argv[idx_adv_rtr]->arg, &adv_router);
level = parse_show_level(idx_level, argc, argv);
- switch (OSPF6_LSA_SCOPE(type)) {
- case OSPF6_SCOPE_AREA:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, &type, &id, &adv_router,
- oa->lsdb);
- }
- break;
-
- case OSPF6_SCOPE_LINKLOCAL:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT,
- oi->interface->name, oa->name);
- ospf6_lsdb_show(vty, level, &type, &id,
- &adv_router, oi->lsdb);
- }
- }
- break;
-
- case OSPF6_SCOPE_AS:
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, &type, &id, &adv_router,
- ospf6->lsdb);
- break;
-
- default:
- assert(0);
- break;
- }
-
- vty_out(vty, "\n");
+ ospf6_lsdb_type_show_wrapper(vty, level, &type, &id, &adv_router, uj,
+ ospf6);
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_database_type_adv_router_linkstate_id,
show_ipv6_ospf6_database_type_adv_router_linkstate_id_cmd,
- "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> adv-router A.B.C.D linkstate-id A.B.C.D [<dump|internal>]",
+ "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> adv-router A.B.C.D linkstate-id A.B.C.D [<dump|internal>] [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
@@ -779,20 +793,19 @@ DEFUN (show_ipv6_ospf6_database_type_adv_router_linkstate_id,
"Search by Link state ID\n"
"Specify Link state ID as IPv4 address notation\n"
"Dump LSAs\n"
- "Display LSA's internal information\n")
+ "Display LSA's internal information\n"
+ JSON_STR)
{
int idx_lsa = 4;
int idx_adv_rtr = 6;
int idx_ls_id = 8;
int idx_level = 9;
int level;
- struct listnode *i, *j;
- struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
uint16_t type = 0;
uint32_t id = 0;
uint32_t adv_router = 0;
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6;
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
@@ -803,44 +816,14 @@ DEFUN (show_ipv6_ospf6_database_type_adv_router_linkstate_id,
inet_pton(AF_INET, argv[idx_ls_id]->arg, &id);
level = parse_show_level(idx_level, argc, argv);
- switch (OSPF6_LSA_SCOPE(type)) {
- case OSPF6_SCOPE_AREA:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, &type, &id, &adv_router,
- oa->lsdb);
- }
- break;
-
- case OSPF6_SCOPE_LINKLOCAL:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT,
- oi->interface->name, oa->name);
- ospf6_lsdb_show(vty, level, &type, &id,
- &adv_router, oi->lsdb);
- }
- }
- break;
-
- case OSPF6_SCOPE_AS:
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, &type, &id, &adv_router,
- ospf6->lsdb);
- break;
-
- default:
- assert(0);
- break;
- }
-
- vty_out(vty, "\n");
+ ospf6_lsdb_type_show_wrapper(vty, level, &type, &id, &adv_router, uj,
+ ospf6);
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_database_self_originated,
show_ipv6_ospf6_database_self_originated_cmd,
- "show ipv6 ospf6 database self-originated [<detail|dump|internal>]",
+ "show ipv6 ospf6 database self-originated [<detail|dump|internal>] [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
@@ -848,46 +831,28 @@ DEFUN (show_ipv6_ospf6_database_self_originated,
"Display Self-originated LSAs\n"
"Display details of LSAs\n"
"Dump LSAs\n"
- "Display LSA's internal information\n")
+ "Display LSA's internal information\n"
+ JSON_STR)
{
int idx_level = 5;
int level;
- struct listnode *i, *j;
- struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
uint32_t adv_router = 0;
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6;
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
OSPF6_CMD_CHECK_RUNNING(ospf6);
level = parse_show_level(idx_level, argc, argv);
adv_router = ospf6->router_id;
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, NULL, NULL, &adv_router, oa->lsdb);
- }
-
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT, oi->interface->name,
- oa->name);
- ospf6_lsdb_show(vty, level, NULL, NULL, &adv_router,
- oi->lsdb);
- }
- }
-
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, NULL, NULL, &adv_router, ospf6->lsdb);
-
- vty_out(vty, "\n");
+ ospf6_lsdb_show_wrapper(vty, level, NULL, NULL, &adv_router, uj, ospf6);
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_database_type_self_originated,
show_ipv6_ospf6_database_type_self_originated_cmd,
- "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> self-originated [<detail|dump|internal>]",
+ "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> self-originated [<detail|dump|internal>] [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
@@ -904,17 +869,16 @@ DEFUN (show_ipv6_ospf6_database_type_self_originated,
"Display Self-originated LSAs\n"
"Display details of LSAs\n"
"Dump LSAs\n"
- "Display LSA's internal information\n")
+ "Display LSA's internal information\n"
+ JSON_STR)
{
int idx_lsa = 4;
int idx_level = 6;
int level;
- struct listnode *i, *j;
- struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
uint16_t type = 0;
uint32_t adv_router = 0;
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6;
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
OSPF6_CMD_CHECK_RUNNING(ospf6);
@@ -923,44 +887,14 @@ DEFUN (show_ipv6_ospf6_database_type_self_originated,
adv_router = ospf6->router_id;
- switch (OSPF6_LSA_SCOPE(type)) {
- case OSPF6_SCOPE_AREA:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, &type, NULL, &adv_router,
- oa->lsdb);
- }
- break;
-
- case OSPF6_SCOPE_LINKLOCAL:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT,
- oi->interface->name, oa->name);
- ospf6_lsdb_show(vty, level, &type, NULL,
- &adv_router, oi->lsdb);
- }
- }
- break;
-
- case OSPF6_SCOPE_AS:
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, &type, NULL, &adv_router,
- ospf6->lsdb);
- break;
-
- default:
- assert(0);
- break;
- }
-
- vty_out(vty, "\n");
+ ospf6_lsdb_type_show_wrapper(vty, level, &type, NULL, &adv_router, uj,
+ ospf6);
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_database_type_self_originated_linkstate_id,
show_ipv6_ospf6_database_type_self_originated_linkstate_id_cmd,
- "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> self-originated linkstate-id A.B.C.D [<detail|dump|internal>]",
+ "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> self-originated linkstate-id A.B.C.D [<detail|dump|internal>] [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
@@ -979,19 +913,18 @@ DEFUN (show_ipv6_ospf6_database_type_self_originated_linkstate_id,
"Specify Link state ID as IPv4 address notation\n"
"Display details of LSAs\n"
"Dump LSAs\n"
- "Display LSA's internal information\n")
+ "Display LSA's internal information\n"
+ JSON_STR)
{
int idx_lsa = 4;
int idx_ls_id = 7;
int idx_level = 8;
int level;
- struct listnode *i, *j;
- struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
uint16_t type = 0;
uint32_t adv_router = 0;
uint32_t id = 0;
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6;
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
OSPF6_CMD_CHECK_RUNNING(ospf6);
@@ -1000,44 +933,14 @@ DEFUN (show_ipv6_ospf6_database_type_self_originated_linkstate_id,
level = parse_show_level(idx_level, argc, argv);
adv_router = ospf6->router_id;
- switch (OSPF6_LSA_SCOPE(type)) {
- case OSPF6_SCOPE_AREA:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, &type, &id, &adv_router,
- oa->lsdb);
- }
- break;
-
- case OSPF6_SCOPE_LINKLOCAL:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT,
- oi->interface->name, oa->name);
- ospf6_lsdb_show(vty, level, &type, &id,
- &adv_router, oi->lsdb);
- }
- }
- break;
-
- case OSPF6_SCOPE_AS:
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, &type, &id, &adv_router,
- ospf6->lsdb);
- break;
-
- default:
- assert(0);
- break;
- }
-
- vty_out(vty, "\n");
+ ospf6_lsdb_type_show_wrapper(vty, level, &type, &id, &adv_router, uj,
+ ospf6);
return CMD_SUCCESS;
}
DEFUN (show_ipv6_ospf6_database_type_id_self_originated,
show_ipv6_ospf6_database_type_id_self_originated_cmd,
- "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> A.B.C.D self-originated [<detail|dump|internal>]",
+ "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> A.B.C.D self-originated [<detail|dump|internal>] [json]",
SHOW_STR
IPV6_STR
OSPF6_STR
@@ -1055,19 +958,18 @@ DEFUN (show_ipv6_ospf6_database_type_id_self_originated,
"Display Self-originated LSAs\n"
"Display details of LSAs\n"
"Dump LSAs\n"
- "Display LSA's internal information\n")
+ "Display LSA's internal information\n"
+ JSON_STR)
{
int idx_lsa = 4;
int idx_ls_id = 5;
int idx_level = 7;
int level;
- struct listnode *i, *j;
- struct ospf6 *ospf6;
- struct ospf6_area *oa;
- struct ospf6_interface *oi;
uint16_t type = 0;
uint32_t adv_router = 0;
uint32_t id = 0;
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6;
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
OSPF6_CMD_CHECK_RUNNING(ospf6);
@@ -1076,38 +978,8 @@ DEFUN (show_ipv6_ospf6_database_type_id_self_originated,
level = parse_show_level(idx_level, argc, argv);
adv_router = ospf6->router_id;
- switch (OSPF6_LSA_SCOPE(type)) {
- case OSPF6_SCOPE_AREA:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name);
- ospf6_lsdb_show(vty, level, &type, &id, &adv_router,
- oa->lsdb);
- }
- break;
-
- case OSPF6_SCOPE_LINKLOCAL:
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) {
- for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) {
- vty_out(vty, IF_LSDB_TITLE_FORMAT,
- oi->interface->name, oa->name);
- ospf6_lsdb_show(vty, level, &type, &id,
- &adv_router, oi->lsdb);
- }
- }
- break;
-
- case OSPF6_SCOPE_AS:
- vty_out(vty, AS_LSDB_TITLE_FORMAT);
- ospf6_lsdb_show(vty, level, &type, &id, &adv_router,
- ospf6->lsdb);
- break;
-
- default:
- assert(0);
- break;
- }
-
- vty_out(vty, "\n");
+ ospf6_lsdb_type_show_wrapper(vty, level, &type, &id, &adv_router, uj,
+ ospf6);
return CMD_SUCCESS;
}
@@ -1134,7 +1006,7 @@ DEFUN (show_ipv6_ospf6_border_routers,
if (strmatch(argv[idx_ipv4]->text, "detail")) {
for (ro = ospf6_route_head(ospf6->brouter_table); ro;
ro = ospf6_route_next(ro))
- ospf6_route_show_detail(vty, ro);
+ ospf6_route_show_detail(vty, ro, NULL, false);
} else {
inet_pton(AF_INET, argv[idx_ipv4]->arg, &adv_router);
@@ -1147,7 +1019,7 @@ DEFUN (show_ipv6_ospf6_border_routers,
return CMD_SUCCESS;
}
- ospf6_route_show_detail(vty, ro);
+ ospf6_route_show_detail(vty, ro, NULL, false);
return CMD_SUCCESS;
}
} else {
diff --git a/ospfclient/ospf_apiclient.c b/ospfclient/ospf_apiclient.c
index d4f0dc953c..1b9b66d745 100644
--- a/ospfclient/ospf_apiclient.c
+++ b/ospfclient/ospf_apiclient.c
@@ -34,6 +34,7 @@
#include "stream.h"
#include "log.h"
#include "memory.h"
+#include "xref.h"
/* work around gcc bug 69981, disable MTYPEs in libospf */
#define _QUAGGA_OSPF_MEMORY_H
@@ -57,6 +58,8 @@
#include "ospfd/ospf_dump_api.c"
#include "ospfd/ospf_api.c"
+XREF_SETUP()
+
DEFINE_MGROUP(OSPFCLIENT, "libospfapiclient")
DEFINE_MTYPE_STATIC(OSPFCLIENT, OSPF_APICLIENT, "OSPF-API client")
diff --git a/ospfd/ospf_asbr.c b/ospfd/ospf_asbr.c
index 94fa1b5b44..0b4e5d7762 100644
--- a/ospfd/ospf_asbr.c
+++ b/ospfd/ospf_asbr.c
@@ -517,7 +517,7 @@ struct ospf_external_aggr_rt *ospf_external_aggr_match(struct ospf *ospf,
struct ospf_external_aggr_rt *ag = node->info;
zlog_debug(
- "%s: Matching aggregator found.prefix:%pI4/%d Aggregator %pI4/%d\n",
+ "%s: Matching aggregator found.prefix:%pI4/%d Aggregator %pI4/%d",
__func__, &p->prefix, p->prefixlen,
&ag->p.prefix, ag->p.prefixlen);
}
@@ -956,7 +956,7 @@ static void ospf_handle_external_aggr_update(struct ospf *ospf)
struct route_node *rn = NULL;
if (IS_DEBUG_OSPF(lsa, EXTNL_LSA_AGGR))
- zlog_debug("%s: Process modified aggregators.\n", __func__);
+ zlog_debug("%s: Process modified aggregators.", __func__);
for (rn = route_top(ospf->rt_aggr_tbl); rn; rn = route_next(rn)) {
struct ospf_external_aggr_rt *aggr;
@@ -1047,7 +1047,7 @@ static int ospf_asbr_external_aggr_process(struct thread *thread)
operation = ospf->aggr_action;
if (IS_DEBUG_OSPF(lsa, EXTNL_LSA_AGGR))
- zlog_debug("%s: operation:%d\n", __func__, operation);
+ zlog_debug("%s: operation:%d", __func__, operation);
switch (operation) {
case OSPF_ROUTE_AGGR_ADD:
diff --git a/ospfd/ospf_ase.c b/ospfd/ospf_ase.c
index e18d8ddb31..51da4a55a7 100644
--- a/ospfd/ospf_ase.c
+++ b/ospfd/ospf_ase.c
@@ -156,84 +156,6 @@ static int ospf_ase_forward_address_check(struct ospf *ospf,
return 1;
}
-#if 0
-/* Calculate ASBR route. */
-static struct ospf_route *
-ospf_ase_calculate_asbr_route (struct ospf *ospf,
- struct route_table *rt_network,
- struct route_table *rt_router,
- struct as_external_lsa *al)
-{
- struct prefix_ipv4 asbr;
- struct ospf_route *asbr_route;
- struct route_node *rn;
-
- /* Find ASBR route from Router routing table. */
- asbr.family = AF_INET;
- asbr.prefix = al->header.adv_router;
- asbr.prefixlen = IPV4_MAX_BITLEN;
- apply_mask_ipv4 (&asbr);
-
- asbr_route = ospf_find_asbr_route (ospf, rt_router, &asbr);
-
- if (asbr_route == NULL)
- {
- if (IS_DEBUG_OSPF (lsa, LSA))
- zlog_debug ("ospf_ase_calculate(): Route to ASBR %pI4 not found",
- &asbr.prefix);
- return NULL;
- }
-
- if (!(asbr_route->u.std.flags & ROUTER_LSA_EXTERNAL))
- {
- if (IS_DEBUG_OSPF (lsa, LSA))
- zlog_debug ("ospf_ase_calculate(): Originating router is not an ASBR");
- return NULL;
- }
-
- if (al->e[0].fwd_addr.s_addr != INADDR_ANY)
- {
- if (IS_DEBUG_OSPF (lsa, LSA))
- zlog_debug ("ospf_ase_calculate(): Forwarding address is not 0.0.0.0.");
-
- if (! ospf_ase_forward_address_check (ospf, al->e[0].fwd_addr))
- {
- if (IS_DEBUG_OSPF (lsa, LSA))
- zlog_debug ("ospf_ase_calculate(): Forwarding address is one of our addresses, Ignore.");
- return NULL;
- }
-
- if (IS_DEBUG_OSPF (lsa, LSA))
- zlog_debug ("ospf_ase_calculate(): Looking up in the Network Routing Table.");
-
- /* Looking up the path to the fwd_addr from Network route. */
- asbr.family = AF_INET;
- asbr.prefix = al->e[0].fwd_addr;
- asbr.prefixlen = IPV4_MAX_BITLEN;
-
- rn = route_node_match (rt_network, (struct prefix *) &asbr);
-
- if (rn == NULL)
- {
- if (IS_DEBUG_OSPF (lsa, LSA))
- zlog_debug ("ospf_ase_calculate(): Couldn't find a route to the forwarding address.");
- return NULL;
- }
-
- route_unlock_node (rn);
-
- if ((asbr_route = rn->info) == NULL)
- {
- if (IS_DEBUG_OSPF (lsa, LSA))
- zlog_debug ("ospf_ase_calculate(): Somehow OSPF route to ASBR is lost");
- return NULL;
- }
- }
-
- return asbr_route;
-}
-#endif
-
static struct ospf_route *
ospf_ase_calculate_new_route(struct ospf_lsa *lsa,
struct ospf_route *asbr_route, uint32_t metric)
diff --git a/ospfd/ospf_bfd.c b/ospfd/ospf_bfd.c
index 4640720952..a9bc9069d2 100644
--- a/ospfd/ospf_bfd.c
+++ b/ospfd/ospf_bfd.c
@@ -205,14 +205,14 @@ static int ospf_bfd_interface_dest_update(ZAPI_CALLBACK_ARGS)
struct ospf_neighbor *nbr = NULL;
struct route_node *node;
struct route_node *n_node;
- struct prefix p;
+ struct prefix p, src_p;
int status;
int old_status;
struct bfd_info *bfd_info;
struct timeval tv;
- ifp = bfd_get_peer_info(zclient->ibuf, &p, NULL, &status,
- NULL, vrf_id);
+ ifp = bfd_get_peer_info(zclient->ibuf, &p, &src_p, &status, NULL,
+ vrf_id);
if ((ifp == NULL) || (p.family != AF_INET))
return 0;
diff --git a/ospfd/ospf_dump.c b/ospfd/ospf_dump.c
index e15c9c42c7..b98852eeee 100644
--- a/ospfd/ospf_dump.c
+++ b/ospfd/ospf_dump.c
@@ -56,6 +56,7 @@ unsigned long conf_debug_ospf_nssa = 0;
unsigned long conf_debug_ospf_te = 0;
unsigned long conf_debug_ospf_ext = 0;
unsigned long conf_debug_ospf_sr = 0;
+unsigned long conf_debug_ospf_ti_lfa = 0;
unsigned long conf_debug_ospf_defaultinfo = 0;
unsigned long conf_debug_ospf_ldp_sync = 0;
unsigned long conf_debug_ospf_gr = 0;
@@ -71,6 +72,7 @@ unsigned long term_debug_ospf_nssa = 0;
unsigned long term_debug_ospf_te = 0;
unsigned long term_debug_ospf_ext = 0;
unsigned long term_debug_ospf_sr = 0;
+unsigned long term_debug_ospf_ti_lfa = 0;
unsigned long term_debug_ospf_defaultinfo;
unsigned long term_debug_ospf_ldp_sync;
unsigned long term_debug_ospf_gr = 0;
@@ -1470,6 +1472,24 @@ DEFUN (no_debug_ospf_sr,
return CMD_SUCCESS;
}
+DEFUN(debug_ospf_ti_lfa, debug_ospf_ti_lfa_cmd, "debug ospf ti-lfa",
+ DEBUG_STR OSPF_STR "OSPF-SR TI-LFA information\n")
+{
+ if (vty->node == CONFIG_NODE)
+ CONF_DEBUG_ON(ti_lfa, TI_LFA);
+ TERM_DEBUG_ON(ti_lfa, TI_LFA);
+ return CMD_SUCCESS;
+}
+
+DEFUN(no_debug_ospf_ti_lfa, no_debug_ospf_ti_lfa_cmd, "no debug ospf ti-lfa",
+ NO_STR DEBUG_STR OSPF_STR "OSPF-SR TI-LFA information\n")
+{
+ if (vty->node == CONFIG_NODE)
+ CONF_DEBUG_OFF(ti_lfa, TI_LFA);
+ TERM_DEBUG_OFF(ti_lfa, TI_LFA);
+ return CMD_SUCCESS;
+}
+
DEFUN (debug_ospf_default_info,
debug_ospf_default_info_cmd,
"debug ospf default-information",
@@ -1891,6 +1911,12 @@ static int config_write_debug(struct vty *vty)
write = 1;
}
+ /* debug ospf sr ti-lfa */
+ if (IS_CONF_DEBUG_OSPF(sr, TI_LFA) == OSPF_DEBUG_TI_LFA) {
+ vty_out(vty, "debug ospf%s ti-lfa\n", str);
+ write = 1;
+ }
+
/* debug ospf ldp-sync */
if (IS_CONF_DEBUG_OSPF(ldp_sync, LDP_SYNC) == OSPF_DEBUG_LDP_SYNC) {
vty_out(vty, "debug ospf%s ldp-sync\n", str);
@@ -1920,6 +1946,7 @@ void ospf_debug_init(void)
install_element(ENABLE_NODE, &debug_ospf_nssa_cmd);
install_element(ENABLE_NODE, &debug_ospf_te_cmd);
install_element(ENABLE_NODE, &debug_ospf_sr_cmd);
+ install_element(ENABLE_NODE, &debug_ospf_ti_lfa_cmd);
install_element(ENABLE_NODE, &debug_ospf_default_info_cmd);
install_element(ENABLE_NODE, &debug_ospf_ldp_sync_cmd);
install_element(ENABLE_NODE, &no_debug_ospf_ism_cmd);
@@ -1930,6 +1957,7 @@ void ospf_debug_init(void)
install_element(ENABLE_NODE, &no_debug_ospf_nssa_cmd);
install_element(ENABLE_NODE, &no_debug_ospf_te_cmd);
install_element(ENABLE_NODE, &no_debug_ospf_sr_cmd);
+ install_element(ENABLE_NODE, &no_debug_ospf_ti_lfa_cmd);
install_element(ENABLE_NODE, &no_debug_ospf_default_info_cmd);
install_element(ENABLE_NODE, &no_debug_ospf_ldp_sync_cmd);
install_element(ENABLE_NODE, &debug_ospf_gr_cmd);
@@ -1962,6 +1990,7 @@ void ospf_debug_init(void)
install_element(CONFIG_NODE, &debug_ospf_nssa_cmd);
install_element(CONFIG_NODE, &debug_ospf_te_cmd);
install_element(CONFIG_NODE, &debug_ospf_sr_cmd);
+ install_element(CONFIG_NODE, &debug_ospf_ti_lfa_cmd);
install_element(CONFIG_NODE, &debug_ospf_default_info_cmd);
install_element(CONFIG_NODE, &debug_ospf_ldp_sync_cmd);
install_element(CONFIG_NODE, &no_debug_ospf_nsm_cmd);
@@ -1971,6 +2000,7 @@ void ospf_debug_init(void)
install_element(CONFIG_NODE, &no_debug_ospf_nssa_cmd);
install_element(CONFIG_NODE, &no_debug_ospf_te_cmd);
install_element(CONFIG_NODE, &no_debug_ospf_sr_cmd);
+ install_element(CONFIG_NODE, &no_debug_ospf_ti_lfa_cmd);
install_element(CONFIG_NODE, &no_debug_ospf_default_info_cmd);
install_element(CONFIG_NODE, &no_debug_ospf_ldp_sync_cmd);
install_element(CONFIG_NODE, &debug_ospf_gr_cmd);
diff --git a/ospfd/ospf_dump.h b/ospfd/ospf_dump.h
index ea607fef7c..c4c5606663 100644
--- a/ospfd/ospf_dump.h
+++ b/ospfd/ospf_dump.h
@@ -60,6 +60,7 @@
#define OSPF_DEBUG_TE 0x04
#define OSPF_DEBUG_EXT 0x08
#define OSPF_DEBUG_SR 0x10
+#define OSPF_DEBUG_TI_LFA 0x11
#define OSPF_DEBUG_DEFAULTINFO 0x20
#define OSPF_DEBUG_LDP_SYNC 0x40
@@ -110,6 +111,8 @@
#define IS_DEBUG_OSPF_SR IS_DEBUG_OSPF(sr, SR)
+#define IS_DEBUG_OSPF_TI_LFA IS_DEBUG_OSPF(ti_lfa, TI_LFA)
+
#define IS_DEBUG_OSPF_DEFAULT_INFO IS_DEBUG_OSPF(defaultinfo, DEFAULTINFO)
#define IS_DEBUG_OSPF_LDP_SYNC IS_DEBUG_OSPF(ldp_sync, LDP_SYNC)
@@ -133,6 +136,7 @@ extern unsigned long term_debug_ospf_nssa;
extern unsigned long term_debug_ospf_te;
extern unsigned long term_debug_ospf_ext;
extern unsigned long term_debug_ospf_sr;
+extern unsigned long term_debug_ospf_ti_lfa;
extern unsigned long term_debug_ospf_defaultinfo;
extern unsigned long term_debug_ospf_ldp_sync;
extern unsigned long term_debug_ospf_gr;
diff --git a/ospfd/ospf_flood.c b/ospfd/ospf_flood.c
index cb2b7c2365..5f74984c66 100644
--- a/ospfd/ospf_flood.c
+++ b/ospfd/ospf_flood.c
@@ -458,11 +458,11 @@ static int ospf_flood_through_interface(struct ospf_interface *oi,
if (IS_DEBUG_OSPF_EVENT)
zlog_debug(
- "%s:ospf_flood_through_interface(): considering int %s, INBR(%s), LSA[%s] AGE %u",
- ospf_get_name(oi->ospf), IF_NAME(oi),
- inbr ?
- inet_ntop(AF_INET, &inbr->router_id, buf, sizeof(buf)) :
- "NULL",
+ "%s: considering int %s (%s), INBR(%s), LSA[%s] AGE %u",
+ __func__, IF_NAME(oi), ospf_get_name(oi->ospf),
+ inbr ? inet_ntop(AF_INET, &inbr->router_id, buf,
+ sizeof(buf))
+ : "NULL",
dump_lsa_key(lsa), ntohs(lsa->data->ls_age));
if (!ospf_if_is_enable(oi))
@@ -483,8 +483,8 @@ static int ospf_flood_through_interface(struct ospf_interface *oi,
onbr = rn->info;
if (IS_DEBUG_OSPF_EVENT)
zlog_debug(
- "ospf_flood_through_interface(): considering nbr %pI4(%s) (%s)",
- &onbr->router_id,
+ "%s: considering nbr %pI4 via %s (%s), state: %s",
+ __func__, &onbr->router_id, IF_NAME(oi),
ospf_get_name(oi->ospf),
lookup_msg(ospf_nsm_state_msg, onbr->state,
NULL));
@@ -504,7 +504,10 @@ static int ospf_flood_through_interface(struct ospf_interface *oi,
if (onbr->state < NSM_Full) {
if (IS_DEBUG_OSPF_EVENT)
zlog_debug(
- "ospf_flood_through_interface(): nbr adj is not Full");
+ "%s: adj to onbr %pI4 is not Full (%s)",
+ __func__, &onbr->router_id,
+ lookup_msg(ospf_nsm_state_msg,
+ onbr->state, NULL));
ls_req = ospf_ls_request_lookup(onbr, lsa);
if (ls_req != NULL) {
int ret;
@@ -534,7 +537,11 @@ static int ospf_flood_through_interface(struct ospf_interface *oi,
if (!CHECK_FLAG(onbr->options, OSPF_OPTION_O)) {
if (IS_DEBUG_OSPF(lsa, LSA_FLOODING))
zlog_debug(
- "Skip this neighbor: Not Opaque-capable.");
+ "%s: Skipping neighbor %s via %s -- Not Opaque-capable.",
+ __func__, IF_NAME(oi),
+ inet_ntop(AF_INET,
+ &onbr->router_id, buf,
+ sizeof(buf)));
continue;
}
}
@@ -550,7 +557,11 @@ static int ospf_flood_through_interface(struct ospf_interface *oi,
&onbr->router_id)) {
if (IS_DEBUG_OSPF(lsa, LSA_FLOODING))
zlog_debug(
- "Skip this neighbor: inbr == onbr");
+ "%s: Skipping neighbor %s via %s -- inbr == onbr.",
+ __func__, IF_NAME(oi),
+ inet_ntop(AF_INET,
+ &inbr->router_id, buf,
+ sizeof(buf)));
continue;
}
} else {
@@ -562,7 +573,11 @@ static int ospf_flood_through_interface(struct ospf_interface *oi,
&onbr->router_id)) {
if (IS_DEBUG_OSPF(lsa, LSA_FLOODING))
zlog_debug(
- "Skip this neighbor: lsah->adv_router == onbr");
+ "%s: Skipping neighbor %s via %s -- lsah->adv_router == onbr.",
+ __func__, IF_NAME(oi),
+ inet_ntop(AF_INET,
+ &onbr->router_id, buf,
+ sizeof(buf)));
continue;
}
}
@@ -591,9 +606,9 @@ static int ospf_flood_through_interface(struct ospf_interface *oi,
received the LSA already. */
if (NBR_IS_DR(inbr) || NBR_IS_BDR(inbr)) {
if (IS_DEBUG_OSPF_NSSA)
- zlog_debug(
- "ospf_flood_through_interface(): DR/BDR NOT SEND to int %s",
- IF_NAME(oi));
+ zlog_debug("%s: DR/BDR NOT SEND to int %s (%s)",
+ __func__, IF_NAME(oi),
+ ospf_get_name(oi->ospf));
return 1;
}
@@ -606,8 +621,9 @@ static int ospf_flood_through_interface(struct ospf_interface *oi,
if (oi->state == ISM_Backup) {
if (IS_DEBUG_OSPF_NSSA)
zlog_debug(
- "ospf_flood_through_interface(): ISM_Backup NOT SEND to int %s",
- IF_NAME(oi));
+ "%s: ISM_Backup NOT SEND to int %s (%s)",
+ __func__, IF_NAME(oi),
+ ospf_get_name(oi->ospf));
return 1;
}
}
@@ -620,9 +636,8 @@ static int ospf_flood_through_interface(struct ospf_interface *oi,
value of MaxAge). */
/* XXX HASSO: Is this IS_DEBUG_OSPF_NSSA really correct? */
if (IS_DEBUG_OSPF_NSSA)
- zlog_debug(
- "ospf_flood_through_interface(): DR/BDR sending upd to int %s",
- IF_NAME(oi));
+ zlog_debug("%s: DR/BDR sending upd to int %s (%s)", __func__,
+ IF_NAME(oi), ospf_get_name(oi->ospf));
/* RFC2328 Section 13.3
On non-broadcast networks, separate Link State Update
diff --git a/ospfd/ospf_ldp_sync.c b/ospfd/ospf_ldp_sync.c
index b574e2cac8..dbd45635b2 100644
--- a/ospfd/ospf_ldp_sync.c
+++ b/ospfd/ospf_ldp_sync.c
@@ -107,6 +107,7 @@ void ospf_ldp_sync_state_req_msg(struct interface *ifp)
ols_debug("ldp_sync: send state request to LDP for %s", ifp->name);
+ memset(&request, 0, sizeof(request));
strlcpy(request.name, ifp->name, sizeof(ifp->name));
request.proto = LDP_IGP_SYNC_IF_STATE_REQUEST;
request.ifindex = ifp->ifindex;
diff --git a/ospfd/ospf_lsa.c b/ospfd/ospf_lsa.c
index 4c9db16c6b..6bde5467b2 100644
--- a/ospfd/ospf_lsa.c
+++ b/ospfd/ospf_lsa.c
@@ -337,7 +337,7 @@ void lsa_header_set(struct stream *s, uint8_t options, uint8_t type,
/* router-LSA related functions. */
/* Get router-LSA flags. */
-static uint8_t router_lsa_flags(struct ospf_area *area)
+uint8_t router_lsa_flags(struct ospf_area *area)
{
uint8_t flags;
@@ -420,9 +420,8 @@ static uint16_t ospf_link_cost(struct ospf_interface *oi)
}
/* Set a link information. */
-static char link_info_set(struct stream **s, struct in_addr id,
- struct in_addr data, uint8_t type, uint8_t tos,
- uint16_t cost)
+char link_info_set(struct stream **s, struct in_addr id, struct in_addr data,
+ uint8_t type, uint8_t tos, uint16_t cost)
{
/* LSA stream is initially allocated to OSPF_MAX_LSA_SIZE, suits
* vast majority of cases. Some rare routers with lots of links need
@@ -679,7 +678,7 @@ static int router_lsa_link_set(struct stream **s, struct ospf_area *area)
}
/* Set router-LSA body. */
-static void ospf_router_lsa_body_set(struct stream **s, struct ospf_area *area)
+void ospf_router_lsa_body_set(struct stream **s, struct ospf_area *area)
{
unsigned long putp;
uint16_t cnt;
@@ -1756,8 +1755,8 @@ static struct ospf_lsa *ospf_lsa_translated_nssa_new(struct ospf *ospf,
== NULL) {
if (IS_DEBUG_OSPF_NSSA)
zlog_debug(
- "ospf_nssa_translate_originate(): Could not originate Translated Type-5 for %pI4",
- &ei.p.prefix);
+ "%s: Could not originate Translated Type-5 for %pI4",
+ __func__, &ei.p.prefix);
return NULL;
}
@@ -1790,24 +1789,22 @@ struct ospf_lsa *ospf_translated_nssa_originate(struct ospf *ospf,
if ((new = ospf_lsa_translated_nssa_new(ospf, type7)) == NULL) {
if (IS_DEBUG_OSPF_NSSA)
zlog_debug(
- "ospf_translated_nssa_originate(): Could not translate Type-7, Id %pI4, to Type-5",
- &type7->data->id);
+ "%s: Could not translate Type-7, Id %pI4, to Type-5",
+ __func__, &type7->data->id);
return NULL;
}
extnew = (struct as_external_lsa *)new->data;
if ((new = ospf_lsa_install(ospf, NULL, new)) == NULL) {
- flog_warn(
- EC_OSPF_LSA_INSTALL_FAILURE,
- "ospf_lsa_translated_nssa_originate(): Could not install LSA id %pI4",
- &type7->data->id);
+ flog_warn(EC_OSPF_LSA_INSTALL_FAILURE,
+ "%s: Could not install LSA id %pI4", __func__,
+ &type7->data->id);
return NULL;
}
if (IS_DEBUG_OSPF_NSSA) {
- zlog_debug(
- "ospf_translated_nssa_originate(): translated Type 7, installed:");
+ zlog_debug("%s: translated Type 7, installed", __func__);
ospf_lsa_header_dump(new->data);
zlog_debug(" Network mask: %d", ip_masklen(extnew->mask));
zlog_debug(" Forward addr: %pI4",
@@ -2166,12 +2163,6 @@ void ospf_external_lsa_flush(struct ospf *ospf, uint8_t type,
/* Sweep LSA from Link State Retransmit List. */
ospf_ls_retransmit_delete_nbr_as(ospf, lsa);
-/* There must be no self-originated LSA in rtrs_external. */
-#if 0
- /* Remove External route from Zebra. */
- ospf_zebra_delete ((struct prefix_ipv4 *) p, &nexthop);
-#endif
-
if (!IS_LSA_MAXAGE(lsa)) {
/* Unregister LSA from Refresh queue. */
ospf_refresher_unregister_lsa(ospf, lsa);
@@ -2257,10 +2248,9 @@ void ospf_external_lsa_refresh_type(struct ospf *ospf, uint8_t type,
lsa,
EXTNL_LSA_AGGR))
zlog_debug(
- "%s: Send Aggreate LSA (%pFX/%d)",
+ "%s: Send Aggreate LSA (%pFX)",
__func__,
- &aggr->p.prefix,
- aggr->p.prefixlen);
+ &aggr->p);
ospf_originate_summary_lsa(
ospf, aggr, ei);
@@ -2443,12 +2433,7 @@ ospf_summary_lsa_install(struct ospf *ospf, struct ospf_lsa *new, int rt_recalc)
necessary to re-examine all the AS-external-LSAs.
*/
-#if 0
- /* This doesn't exist yet... */
- ospf_summary_incremental_update(new); */
-#else /* #if 0 */
ospf_spf_calculate_schedule(ospf, SPF_FLAG_SUMMARY_LSA_INSTALL);
-#endif /* #if 0 */
}
if (IS_LSA_SELF(new))
@@ -2469,16 +2454,8 @@ static struct ospf_lsa *ospf_summary_asbr_lsa_install(struct ospf *ospf,
destination is an AS boundary router, it may also be
necessary to re-examine all the AS-external-LSAs.
*/
-#if 0
- /* These don't exist yet... */
- ospf_summary_incremental_update(new);
- /* Isn't this done by the above call?
- - RFC 2328 Section 16.5 implies it should be */
- /* ospf_ase_calculate_schedule(); */
-#else /* #if 0 */
ospf_spf_calculate_schedule(ospf,
SPF_FLAG_ASBR_SUMMARY_LSA_INSTALL);
-#endif /* #if 0 */
}
/* register LSA to refresh-list. */
@@ -2667,7 +2644,7 @@ struct ospf_lsa *ospf_lsa_install(struct ospf *ospf, struct ospf_interface *oi,
} else {
if (IS_DEBUG_OSPF(lsa, LSA_GENERATE)) {
zlog_debug(
- "ospf_lsa_install() got an lsa with seq 0x80000000 that was not self originated. Ignoring\n");
+ "ospf_lsa_install() got an lsa with seq 0x80000000 that was not self originated. Ignoring");
ospf_lsa_header_dump(lsa->data);
}
return old;
@@ -2857,9 +2834,10 @@ static int ospf_maxage_lsa_remover(struct thread *thread)
*/
if (old != lsa) {
flog_err(EC_OSPF_LSA_MISSING,
- "%s: LSA[Type%d:%s]: LSA not in LSDB",
- __func__, lsa->data->type,
- inet_ntoa(lsa->data->id));
+ "%s: LSA[Type%d:%pI4]: LSA not in LSDB",
+ __func__, lsa->data->type,
+ &lsa->data->id);
+
continue;
}
ospf_discard_from_db(ospf, lsa->lsdb, lsa);
diff --git a/ospfd/ospf_lsa.h b/ospfd/ospf_lsa.h
index c5de287948..f2a0d36e7e 100644
--- a/ospfd/ospf_lsa.h
+++ b/ospfd/ospf_lsa.h
@@ -260,6 +260,8 @@ extern struct lsa_header *ospf_lsa_data_dup(struct lsa_header *);
extern void ospf_lsa_data_free(struct lsa_header *);
/* Prototype for various LSAs */
+extern void ospf_router_lsa_body_set(struct stream **s, struct ospf_area *area);
+extern uint8_t router_lsa_flags(struct ospf_area *area);
extern int ospf_router_lsa_update(struct ospf *);
extern int ospf_router_lsa_update_area(struct ospf_area *);
@@ -333,6 +335,10 @@ extern int is_prefix_default(struct prefix_ipv4 *);
extern int metric_type(struct ospf *, uint8_t, unsigned short);
extern int metric_value(struct ospf *, uint8_t, unsigned short);
+extern char link_info_set(struct stream **s, struct in_addr id,
+ struct in_addr data, uint8_t type, uint8_t tos,
+ uint16_t cost);
+
extern struct in_addr ospf_get_nssa_ip(struct ospf_area *);
extern int ospf_translated_nssa_compare(struct ospf_lsa *, struct ospf_lsa *);
extern struct ospf_lsa *ospf_translated_nssa_refresh(struct ospf *,
diff --git a/ospfd/ospf_memory.c b/ospfd/ospf_memory.c
index ae22cec414..f4fb68cbdf 100644
--- a/ospfd/ospf_memory.c
+++ b/ospfd/ospf_memory.c
@@ -58,3 +58,5 @@ DEFINE_MTYPE(OSPFD, OSPF_EXT_PARAMS, "OSPF Extended parameters")
DEFINE_MTYPE(OSPFD, OSPF_SR_PARAMS, "OSPF Segment Routing parameters")
DEFINE_MTYPE(OSPFD, OSPF_GR_HELPER, "OSPF Graceful Restart Helper")
DEFINE_MTYPE(OSPFD, OSPF_EXTERNAL_RT_AGGR, "OSPF External Route Summarisation")
+DEFINE_MTYPE(OSPFD, OSPF_P_SPACE, "OSPF TI-LFA P-Space")
+DEFINE_MTYPE(OSPFD, OSPF_Q_SPACE, "OSPF TI-LFA Q-Space")
diff --git a/ospfd/ospf_memory.h b/ospfd/ospf_memory.h
index 624b1d3306..42bc8d7b77 100644
--- a/ospfd/ospf_memory.h
+++ b/ospfd/ospf_memory.h
@@ -57,5 +57,7 @@ DECLARE_MTYPE(OSPF_SR_PARAMS)
DECLARE_MTYPE(OSPF_EXT_PARAMS)
DECLARE_MTYPE(OSPF_GR_HELPER)
DECLARE_MTYPE(OSPF_EXTERNAL_RT_AGGR)
+DECLARE_MTYPE(OSPF_P_SPACE)
+DECLARE_MTYPE(OSPF_Q_SPACE)
#endif /* _QUAGGA_OSPF_MEMORY_H */
diff --git a/ospfd/ospf_packet.c b/ospfd/ospf_packet.c
index ef39b6c2f6..343e406f28 100644
--- a/ospfd/ospf_packet.c
+++ b/ospfd/ospf_packet.c
@@ -2987,6 +2987,16 @@ static enum ospf_read_return_enum ospf_read_helper(struct ospf *ospf)
}
}
+ if (ospf->vrf_id == VRF_DEFAULT && ospf->vrf_id != ifp->vrf_id) {
+ /*
+ * We may have a situation where l3mdev_accept == 1
+ * let's just kindly drop the packet and move on.
+ * ospf really really really does not like when
+ * we receive the same packet multiple times.
+ */
+ return OSPF_READ_CONTINUE;
+ }
+
/* Self-originated packet should be discarded silently. */
if (ospf_if_lookup_by_local_addr(ospf, NULL, iph->ip_src)) {
if (IS_DEBUG_OSPF_PACKET(0, RECV)) {
diff --git a/ospfd/ospf_ri.c b/ospfd/ospf_ri.c
index 3145d16161..4083ea9332 100644
--- a/ospfd/ospf_ri.c
+++ b/ospfd/ospf_ri.c
@@ -1451,9 +1451,7 @@ static uint16_t show_vty_sr_range(struct vty *vty, struct tlv_header *tlvh)
GET_LABEL(ntohl(range->lower.value)));
} else {
zlog_debug(
- " Segment Routing %s Range TLV:\n"
- " Range Size = %d\n"
- " SID Label = %d\n\n",
+ " Segment Routing %s Range TLV: Range Size = %d SID Label = %d",
ntohs(range->header.type) == RI_SR_TLV_SRGB_LABEL_RANGE
? "Global"
: "Local",
@@ -1476,8 +1474,7 @@ static uint16_t show_vty_sr_msd(struct vty *vty, struct tlv_header *tlvh)
msd->value);
} else {
zlog_debug(
- " Segment Routing MSD TLV:\n"
- " Node Maximum Stack Depth = %d\n",
+ " Segment Routing MSD TLV: Node Maximum Stack Depth = %d",
msd->value);
}
diff --git a/ospfd/ospf_route.c b/ospfd/ospf_route.c
index 590122e223..7cfcaf14be 100644
--- a/ospfd/ospf_route.c
+++ b/ospfd/ospf_route.c
@@ -71,15 +71,31 @@ struct ospf_path *ospf_path_new(void)
static struct ospf_path *ospf_path_dup(struct ospf_path *path)
{
struct ospf_path *new;
+ int memsize;
new = ospf_path_new();
memcpy(new, path, sizeof(struct ospf_path));
+ /* optional TI-LFA backup paths */
+ if (path->srni.backup_label_stack) {
+ memsize = sizeof(struct mpls_label_stack)
+ + (sizeof(mpls_label_t)
+ * path->srni.backup_label_stack->num_labels);
+ new->srni.backup_label_stack =
+ XCALLOC(MTYPE_OSPF_PATH, memsize);
+ memcpy(new->srni.backup_label_stack,
+ path->srni.backup_label_stack, memsize);
+ }
+
return new;
}
void ospf_path_free(struct ospf_path *op)
{
+ /* optional TI-LFA backup paths */
+ if (op->srni.backup_label_stack)
+ XFREE(MTYPE_OSPF_PATH, op->srni.backup_label_stack);
+
XFREE(MTYPE_OSPF_PATH, op);
}
@@ -140,6 +156,35 @@ static int ospf_route_exist_new_table(struct route_table *rt,
return 1;
}
+static int ospf_route_backup_path_same(struct sr_nexthop_info *srni1,
+ struct sr_nexthop_info *srni2)
+{
+ struct mpls_label_stack *ls1, *ls2;
+ uint8_t label_count;
+
+ ls1 = srni1->backup_label_stack;
+ ls2 = srni2->backup_label_stack;
+
+ if (!ls1 && !ls2)
+ return 1;
+
+ if ((ls1 && !ls2) || (!ls1 && ls2))
+ return 0;
+
+ if (ls1->num_labels != ls2->num_labels)
+ return 0;
+
+ for (label_count = 0; label_count < ls1->num_labels; label_count++) {
+ if (ls1->label[label_count] != ls2->label[label_count])
+ return 0;
+ }
+
+ if (!IPV4_ADDR_SAME(&srni1->backup_nexthop, &srni2->backup_nexthop))
+ return 0;
+
+ return 1;
+}
+
/* If a prefix and a nexthop match any route in the routing table,
then return 1, otherwise return 0. */
int ospf_route_match_same(struct route_table *rt, struct prefix_ipv4 *prefix,
@@ -180,6 +225,11 @@ int ospf_route_match_same(struct route_table *rt, struct prefix_ipv4 *prefix,
return 0;
if (op->ifindex != newop->ifindex)
return 0;
+
+ /* check TI-LFA backup paths */
+ if (!ospf_route_backup_path_same(&op->srni,
+ &newop->srni))
+ return 0;
}
return 1;
} else if (prefix_same(&rn->p, (struct prefix *)prefix))
@@ -664,38 +714,6 @@ void ospf_route_table_dump(struct route_table *rt)
zlog_debug("========================================");
}
-void ospf_route_table_print(struct vty *vty, struct route_table *rt)
-{
- struct route_node *rn;
- struct ospf_route * or ;
- struct listnode *pnode;
- struct ospf_path *path;
-
- vty_out(vty, "========== OSPF routing table ==========\n");
- for (rn = route_top(rt); rn; rn = route_next(rn))
- if ((or = rn->info) != NULL) {
- if (or->type == OSPF_DESTINATION_NETWORK) {
- vty_out(vty, "N %-18pFX %-15pI4 %s %d\n",
- &rn->p, & or->u.std.area_id,
- ospf_path_type_str[or->path_type],
- or->cost);
- for (ALL_LIST_ELEMENTS_RO(or->paths, pnode,
- path))
- if (path->nexthop.s_addr != INADDR_ANY)
- vty_out(vty, " -> %pI4\n",
- &path->nexthop);
- else
- vty_out(vty, " -> %s\n",
- "directly connected");
- } else
- vty_out(vty, "R %-18pI4 %-15pI4 %s %d\n",
- &rn->p.u.prefix4, & or->u.std.area_id,
- ospf_path_type_str[or->path_type],
- or->cost);
- }
- vty_out(vty, "========================================\n");
-}
-
/* This is 16.4.1 implementation.
o Intra-area paths using non-backbone areas are always the most preferred.
o The other paths, intra-area backbone paths and inter-area paths,
@@ -802,6 +820,7 @@ void ospf_route_copy_nexthops_from_vertex(struct ospf_area *area,
|| area->spf_dry_run) {
path = ospf_path_new();
path->nexthop = nexthop->router;
+ path->adv_router = v->id;
if (oi) {
path->ifindex = oi->ifp->ifindex;
diff --git a/ospfd/ospf_route.h b/ospfd/ospf_route.h
index c3fa5954d5..811581c0d3 100644
--- a/ospfd/ospf_route.h
+++ b/ospfd/ospf_route.h
@@ -42,6 +42,10 @@ struct sr_nexthop_info {
* or NULL if next hop is the destination of the prefix
*/
struct sr_node *nexthop;
+
+ /* TI-LFA */
+ struct mpls_label_stack *backup_label_stack;
+ struct in_addr backup_nexthop;
};
/* OSPF Path. */
@@ -132,7 +136,6 @@ extern void ospf_route_table_free(struct route_table *);
extern void ospf_route_install(struct ospf *, struct route_table *);
extern void ospf_route_table_dump(struct route_table *);
-extern void ospf_route_table_print(struct vty *vty, struct route_table *rt);
extern void ospf_intra_add_router(struct route_table *, struct vertex *,
struct ospf_area *);
diff --git a/ospfd/ospf_snmp.c b/ospfd/ospf_snmp.c
index 66dd9c7ca4..3f4ca44b05 100644
--- a/ospfd/ospf_snmp.c
+++ b/ospfd/ospf_snmp.c
@@ -1236,7 +1236,6 @@ static struct ospf_nbr_nbma *ospfHostLookup(struct variable *v, oid *name,
size_t *length,
struct in_addr *addr, int exact)
{
- int len;
struct ospf_nbr_nbma *nbr_nbma;
struct ospf *ospf;
@@ -1258,28 +1257,8 @@ static struct ospf_nbr_nbma *ospfHostLookup(struct variable *v, oid *name,
nbr_nbma = ospf_nbr_nbma_lookup(ospf, *addr);
return nbr_nbma;
- } else {
- len = *length - v->namelen;
- if (len > 4)
- len = 4;
-
- oid2in_addr(name + v->namelen, len, addr);
-
- nbr_nbma =
- ospf_nbr_nbma_lookup_next(ospf, addr, len == 0 ? 1 : 0);
-
- if (nbr_nbma == NULL)
- return NULL;
-
- oid_copy_addr(name + v->namelen, addr, IN_ADDR_SIZE);
-
- /* Set TOS 0. */
- name[v->namelen + IN_ADDR_SIZE] = 0;
-
- *length = v->namelen + IN_ADDR_SIZE + 1;
-
- return nbr_nbma;
}
+
return NULL;
}
diff --git a/ospfd/ospf_spf.c b/ospfd/ospf_spf.c
index 4665f53edb..82acd0829c 100644
--- a/ospfd/ospf_spf.c
+++ b/ospfd/ospf_spf.c
@@ -46,6 +46,7 @@
#include "ospfd/ospf_abr.h"
#include "ospfd/ospf_dump.h"
#include "ospfd/ospf_sr.h"
+#include "ospfd/ospf_ti_lfa.h"
#include "ospfd/ospf_errors.h"
/* Variables to ensure a SPF scheduled log message is printed only once */
@@ -141,11 +142,16 @@ static void ospf_canonical_nexthops_free(struct vertex *root)
ospf_canonical_nexthops_free(child);
/* Free child nexthops pointing back to this root vertex */
- for (ALL_LIST_ELEMENTS(child->parents, n2, nn2, vp))
+ for (ALL_LIST_ELEMENTS(child->parents, n2, nn2, vp)) {
if (vp->parent == root && vp->nexthop) {
vertex_nexthop_free(vp->nexthop);
vp->nexthop = NULL;
+ if (vp->local_nexthop) {
+ vertex_nexthop_free(vp->local_nexthop);
+ vp->local_nexthop = NULL;
+ }
}
+ }
}
}
@@ -154,7 +160,8 @@ static void ospf_canonical_nexthops_free(struct vertex *root)
* vertex_nexthop, with refcounts.
*/
static struct vertex_parent *vertex_parent_new(struct vertex *v, int backlink,
- struct vertex_nexthop *hop)
+ struct vertex_nexthop *hop,
+ struct vertex_nexthop *lhop)
{
struct vertex_parent *new;
@@ -163,6 +170,7 @@ static struct vertex_parent *vertex_parent_new(struct vertex *v, int backlink,
new->parent = v;
new->backlink = backlink;
new->nexthop = hop;
+ new->local_nexthop = lhop;
return new;
}
@@ -172,7 +180,7 @@ static void vertex_parent_free(void *p)
XFREE(MTYPE_OSPF_VERTEX_PARENT, p);
}
-static int vertex_parent_cmp(void *aa, void *bb)
+int vertex_parent_cmp(void *aa, void *bb)
{
struct vertex_parent *a = aa, *b = bb;
return IPV4_ADDR_CMP(&a->nexthop->router, &b->nexthop->router);
@@ -284,6 +292,257 @@ static void ospf_vertex_add_parent(struct vertex *v)
}
}
+/* Find a vertex according to its router id */
+struct vertex *ospf_spf_vertex_find(struct in_addr id, struct list *vertex_list)
+{
+ struct listnode *node;
+ struct vertex *found;
+
+ for (ALL_LIST_ELEMENTS_RO(vertex_list, node, found)) {
+ if (found->id.s_addr == id.s_addr)
+ return found;
+ }
+
+ return NULL;
+}
+
+/* Find a vertex parent according to its router id */
+struct vertex_parent *ospf_spf_vertex_parent_find(struct in_addr id,
+ struct vertex *vertex)
+{
+ struct listnode *node;
+ struct vertex_parent *found;
+
+ for (ALL_LIST_ELEMENTS_RO(vertex->parents, node, found)) {
+ if (found->parent->id.s_addr == id.s_addr)
+ return found;
+ }
+
+ return NULL;
+}
+
+struct vertex *ospf_spf_vertex_by_nexthop(struct vertex *root,
+ struct in_addr *nexthop)
+{
+ struct listnode *node;
+ struct vertex *child;
+ struct vertex_parent *vertex_parent;
+
+ for (ALL_LIST_ELEMENTS_RO(root->children, node, child)) {
+ vertex_parent = ospf_spf_vertex_parent_find(root->id, child);
+ if (vertex_parent->nexthop->router.s_addr == nexthop->s_addr)
+ return child;
+ }
+
+ return NULL;
+}
+
+/* Create a deep copy of a SPF vertex without children and parents */
+static struct vertex *ospf_spf_vertex_copy(struct vertex *vertex)
+{
+ struct vertex *copy;
+
+ copy = XCALLOC(MTYPE_OSPF_VERTEX, sizeof(struct vertex));
+
+ memcpy(copy, vertex, sizeof(struct vertex));
+ copy->parents = list_new();
+ copy->parents->del = vertex_parent_free;
+ copy->parents->cmp = vertex_parent_cmp;
+ copy->children = list_new();
+
+ return copy;
+}
+
+/* Create a deep copy of a SPF vertex_parent */
+static struct vertex_parent *
+ospf_spf_vertex_parent_copy(struct vertex_parent *vertex_parent)
+{
+ struct vertex_parent *vertex_parent_copy;
+ struct vertex_nexthop *nexthop_copy, *local_nexthop_copy;
+
+ vertex_parent_copy =
+ XCALLOC(MTYPE_OSPF_VERTEX, sizeof(struct vertex_parent));
+
+ nexthop_copy = vertex_nexthop_new();
+ local_nexthop_copy = vertex_nexthop_new();
+
+ memcpy(vertex_parent_copy, vertex_parent, sizeof(struct vertex_parent));
+ memcpy(nexthop_copy, vertex_parent->nexthop,
+ sizeof(struct vertex_nexthop));
+ memcpy(local_nexthop_copy, vertex_parent->local_nexthop,
+ sizeof(struct vertex_nexthop));
+
+ vertex_parent_copy->nexthop = nexthop_copy;
+ vertex_parent_copy->local_nexthop = local_nexthop_copy;
+
+ return vertex_parent_copy;
+}
+
+/* Create a deep copy of a SPF tree */
+void ospf_spf_copy(struct vertex *vertex, struct list *vertex_list)
+{
+ struct listnode *node;
+ struct vertex *vertex_copy, *child, *child_copy, *parent_copy;
+ struct vertex_parent *vertex_parent, *vertex_parent_copy;
+
+ /* First check if the node is already in the vertex list */
+ vertex_copy = ospf_spf_vertex_find(vertex->id, vertex_list);
+ if (!vertex_copy) {
+ vertex_copy = ospf_spf_vertex_copy(vertex);
+ listnode_add(vertex_list, vertex_copy);
+ }
+
+ /* Copy all parents, create parent nodes if necessary */
+ for (ALL_LIST_ELEMENTS_RO(vertex->parents, node, vertex_parent)) {
+ parent_copy = ospf_spf_vertex_find(vertex_parent->parent->id,
+ vertex_list);
+ if (!parent_copy) {
+ parent_copy =
+ ospf_spf_vertex_copy(vertex_parent->parent);
+ listnode_add(vertex_list, parent_copy);
+ }
+ vertex_parent_copy = ospf_spf_vertex_parent_copy(vertex_parent);
+ vertex_parent_copy->parent = parent_copy;
+ listnode_add(vertex_copy->parents, vertex_parent_copy);
+ }
+
+ /* Copy all children, create child nodes if necessary */
+ for (ALL_LIST_ELEMENTS_RO(vertex->children, node, child)) {
+ child_copy = ospf_spf_vertex_find(child->id, vertex_list);
+ if (!child_copy) {
+ child_copy = ospf_spf_vertex_copy(child);
+ listnode_add(vertex_list, child_copy);
+ }
+ listnode_add(vertex_copy->children, child_copy);
+ }
+
+ /* Finally continue copying with child nodes */
+ for (ALL_LIST_ELEMENTS_RO(vertex->children, node, child))
+ ospf_spf_copy(child, vertex_list);
+}
+
+static void ospf_spf_remove_branch(struct vertex_parent *vertex_parent,
+ struct vertex *child,
+ struct list *vertex_list)
+{
+ struct listnode *node, *nnode, *inner_node, *inner_nnode;
+ struct vertex *grandchild;
+ struct vertex_parent *vertex_parent_found;
+ bool has_more_links = false;
+
+ /*
+ * First check if there are more nexthops for that parent to that child
+ */
+ for (ALL_LIST_ELEMENTS_RO(child->parents, node, vertex_parent_found)) {
+ if (vertex_parent_found->parent->id.s_addr
+ == vertex_parent->parent->id.s_addr
+ && vertex_parent_found->nexthop->router.s_addr
+ != vertex_parent->nexthop->router.s_addr)
+ has_more_links = true;
+ }
+
+ /*
+ * No more links from that parent? Then delete the child from its
+ * children list.
+ */
+ if (!has_more_links)
+ listnode_delete(vertex_parent->parent->children, child);
+
+ /*
+ * Delete the vertex_parent from the child parents list, this needs to
+ * be done anyway.
+ */
+ listnode_delete(child->parents, vertex_parent);
+
+ /*
+ * Are there actually more parents left? If not, then delete the child!
+ * This is done by recursively removing the links to the grandchildren,
+ * such that finally the child can be removed without leaving unused
+ * partial branches.
+ */
+ if (child->parents->count == 0) {
+ for (ALL_LIST_ELEMENTS(child->children, node, nnode,
+ grandchild)) {
+ for (ALL_LIST_ELEMENTS(grandchild->parents, inner_node,
+ inner_nnode,
+ vertex_parent_found)) {
+ ospf_spf_remove_branch(vertex_parent_found,
+ grandchild, vertex_list);
+ }
+ }
+ listnode_delete(vertex_list, child);
+ ospf_vertex_free(child);
+ }
+}
+
+static int ospf_spf_remove_link(struct vertex *vertex, struct list *vertex_list,
+ struct router_lsa_link *link)
+{
+ struct listnode *node, *inner_node;
+ struct vertex *child;
+ struct vertex_parent *vertex_parent;
+
+ /*
+ * Identify the node who shares a subnet (given by the link) with a
+ * child and remove the branch of this particular child.
+ */
+ for (ALL_LIST_ELEMENTS_RO(vertex->children, node, child)) {
+ for (ALL_LIST_ELEMENTS_RO(child->parents, inner_node,
+ vertex_parent)) {
+ if ((vertex_parent->local_nexthop->router.s_addr
+ & link->link_data.s_addr)
+ == (link->link_id.s_addr
+ & link->link_data.s_addr)) {
+ ospf_spf_remove_branch(vertex_parent, child,
+ vertex_list);
+ return 0;
+ }
+ }
+ }
+
+ /* No link found yet, move on recursively */
+ for (ALL_LIST_ELEMENTS_RO(vertex->children, node, child)) {
+ if (ospf_spf_remove_link(child, vertex_list, link) == 0)
+ return 0;
+ }
+
+ /* link was not removed yet */
+ return 1;
+}
+
+void ospf_spf_remove_resource(struct vertex *vertex, struct list *vertex_list,
+ struct protected_resource *resource)
+{
+ struct listnode *node, *nnode;
+ struct vertex *found;
+ struct vertex_parent *vertex_parent;
+
+ switch (resource->type) {
+ case OSPF_TI_LFA_LINK_PROTECTION:
+ ospf_spf_remove_link(vertex, vertex_list, resource->link);
+ break;
+ case OSPF_TI_LFA_NODE_PROTECTION:
+ found = ospf_spf_vertex_find(resource->router_id, vertex_list);
+ if (!found)
+ break;
+
+ /*
+ * Remove the node by removing all links from its parents. Note
+ * that the child is automatically removed here with the last
+ * link from a parent, hence no explicit removal of the node.
+ */
+ for (ALL_LIST_ELEMENTS(found->parents, node, nnode,
+ vertex_parent))
+ ospf_spf_remove_branch(vertex_parent, found,
+ vertex_list);
+
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+}
+
static void ospf_spf_init(struct ospf_area *area, struct ospf_lsa *root_lsa,
bool is_dry_run, bool is_root_node)
{
@@ -427,6 +686,7 @@ static void ospf_spf_flush_parents(struct vertex *w)
*/
static void ospf_spf_add_parent(struct vertex *v, struct vertex *w,
struct vertex_nexthop *newhop,
+ struct vertex_nexthop *newlhop,
unsigned int distance)
{
struct vertex_parent *vp, *wp;
@@ -482,7 +742,8 @@ static void ospf_spf_add_parent(struct vertex *v, struct vertex *w,
}
}
- vp = vertex_parent_new(v, ospf_lsa_has_link(w->lsa, v->lsa), newhop);
+ vp = vertex_parent_new(v, ospf_lsa_has_link(w->lsa, v->lsa), newhop,
+ newlhop);
listnode_add_sort(w->parents, vp);
return;
@@ -541,7 +802,7 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area,
unsigned int distance, int lsa_pos)
{
struct listnode *node, *nnode;
- struct vertex_nexthop *nh;
+ struct vertex_nexthop *nh, *lnh;
struct vertex_parent *vp;
unsigned int added = 0;
char buf1[BUFSIZ];
@@ -586,17 +847,22 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area,
struct ospf_interface *oi = NULL;
struct in_addr nexthop = {.s_addr = 0};
- oi = ospf_if_lookup_by_lsa_pos(area, lsa_pos);
- if (!oi) {
- zlog_debug(
- "%s: OI not found in LSA: lsa_pos: %d link_id:%s link_data:%s",
- __func__, lsa_pos,
- inet_ntop(AF_INET, &l->link_id,
- buf1, BUFSIZ),
- inet_ntop(AF_INET,
- &l->link_data, buf2,
- BUFSIZ));
- return 0;
+ if (area->spf_root_node) {
+ oi = ospf_if_lookup_by_lsa_pos(area,
+ lsa_pos);
+ if (!oi) {
+ zlog_debug(
+ "%s: OI not found in LSA: lsa_pos: %d link_id:%s link_data:%s",
+ __func__, lsa_pos,
+ inet_ntop(AF_INET,
+ &l->link_id,
+ buf1, BUFSIZ),
+ inet_ntop(AF_INET,
+ &l->link_data,
+ buf2,
+ BUFSIZ));
+ return 0;
+ }
}
/*
@@ -644,7 +910,21 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area,
* as described above using a reverse lookup to
* figure out the nexthop.
*/
- if (oi->type == OSPF_IFTYPE_POINTOPOINT) {
+
+ /*
+ * HACK: we don't know (yet) how to distinguish
+ * between P2P and P2MP interfaces by just
+ * looking at LSAs, which is important for
+ * TI-LFA since you want to do SPF calculations
+ * from the perspective of other nodes. Since
+ * TI-LFA is currently not implemented for P2MP
+ * we just check here if it is enabled and then
+ * blindly assume that P2P is used. Ultimately
+ * the interface code needs to be removed
+ * somehow.
+ */
+ if (area->ospf->ti_lfa_enabled
+ || (oi && oi->type == OSPF_IFTYPE_POINTOPOINT)) {
struct ospf_neighbor *nbr_w = NULL;
/* Calculating node is root node, link
@@ -673,7 +953,7 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area,
}
}
}
- } else if (oi->type
+ } else if (oi && oi->type
== OSPF_IFTYPE_POINTOMULTIPOINT) {
struct prefix_ipv4 la;
@@ -703,12 +983,22 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area,
nh = vertex_nexthop_new();
nh->router = nexthop;
nh->lsa_pos = lsa_pos;
- ospf_spf_add_parent(v, w, nh, distance);
+
+ /*
+ * Since v is the root the nexthop and
+ * local nexthop are the same.
+ */
+ lnh = vertex_nexthop_new();
+ memcpy(lnh, nh,
+ sizeof(struct vertex_nexthop));
+
+ ospf_spf_add_parent(v, w, nh, lnh,
+ distance);
return 1;
} else
zlog_info(
"%s: could not determine nexthop for link %s",
- __func__, oi->ifp->name);
+ __func__, oi ? oi->ifp->name : "");
} /* end point-to-point link from V to W */
else if (l->m[0].type == LSA_LINK_TYPE_VIRTUALLINK) {
/*
@@ -733,7 +1023,17 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area,
nh = vertex_nexthop_new();
nh->router = vl_data->nexthop.router;
nh->lsa_pos = vl_data->nexthop.lsa_pos;
- ospf_spf_add_parent(v, w, nh, distance);
+
+ /*
+ * Since v is the root the nexthop and
+ * local nexthop are the same.
+ */
+ lnh = vertex_nexthop_new();
+ memcpy(lnh, nh,
+ sizeof(struct vertex_nexthop));
+
+ ospf_spf_add_parent(v, w, nh, lnh,
+ distance);
return 1;
} else
zlog_info(
@@ -747,7 +1047,15 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area,
nh = vertex_nexthop_new();
nh->router.s_addr = 0; /* Nexthop not required */
nh->lsa_pos = lsa_pos;
- ospf_spf_add_parent(v, w, nh, distance);
+
+ /*
+ * Since v is the root the nexthop and
+ * local nexthop are the same.
+ */
+ lnh = vertex_nexthop_new();
+ memcpy(lnh, nh, sizeof(struct vertex_nexthop));
+
+ ospf_spf_add_parent(v, w, nh, lnh, distance);
return 1;
}
} /* end V is the root */
@@ -780,8 +1088,18 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area,
nh = vertex_nexthop_new();
nh->router = l->link_data;
nh->lsa_pos = vp->nexthop->lsa_pos;
+
+ /*
+ * Since v is the root the nexthop and
+ * local nexthop are the same.
+ */
+ lnh = vertex_nexthop_new();
+ memcpy(lnh, nh,
+ sizeof(struct vertex_nexthop));
+
added = 1;
- ospf_spf_add_parent(v, w, nh, distance);
+ ospf_spf_add_parent(v, w, nh, lnh,
+ distance);
}
/*
* Note lack of return is deliberate. See next
@@ -829,12 +1147,154 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area,
for (ALL_LIST_ELEMENTS(v->parents, node, nnode, vp)) {
added = 1;
- ospf_spf_add_parent(v, w, vp->nexthop, distance);
+
+ /*
+ * The nexthop is inherited, but the local nexthop still needs
+ * to be created.
+ */
+ if (l) {
+ lnh = vertex_nexthop_new();
+ lnh->router = l->link_data;
+ lnh->lsa_pos = lsa_pos;
+ } else {
+ lnh = NULL;
+ }
+
+ ospf_spf_add_parent(v, w, vp->nexthop, lnh, distance);
}
return added;
}
+static int ospf_spf_is_protected_resource(struct ospf_area *area,
+ struct router_lsa_link *link,
+ struct lsa_header *lsa)
+{
+ uint8_t *p, *lim;
+ struct router_lsa_link *p_link;
+ struct router_lsa_link *l = NULL;
+ struct in_addr router_id;
+ int link_type;
+
+ if (!area->spf_protected_resource)
+ return 0;
+
+ link_type = link->m[0].type;
+
+ switch (area->spf_protected_resource->type) {
+ case OSPF_TI_LFA_LINK_PROTECTION:
+ p_link = area->spf_protected_resource->link;
+ if (!p_link)
+ return 0;
+
+ /* For P2P: check if the link belongs to the same subnet */
+ if (link_type == LSA_LINK_TYPE_POINTOPOINT
+ && (p_link->link_id.s_addr & p_link->link_data.s_addr)
+ == (link->link_data.s_addr
+ & p_link->link_data.s_addr))
+ return 1;
+
+ /* For stub: check if this the same subnet */
+ if (link_type == LSA_LINK_TYPE_STUB
+ && (p_link->link_id.s_addr == link->link_id.s_addr)
+ && (p_link->link_data.s_addr == link->link_data.s_addr))
+ return 1;
+
+ break;
+ case OSPF_TI_LFA_NODE_PROTECTION:
+ router_id = area->spf_protected_resource->router_id;
+ if (router_id.s_addr == INADDR_ANY)
+ return 0;
+
+ /* For P2P: check if the link leads to the protected node */
+ if (link_type == LSA_LINK_TYPE_POINTOPOINT
+ && link->link_id.s_addr == router_id.s_addr)
+ return 1;
+
+ /* The rest is about stub links! */
+ if (link_type != LSA_LINK_TYPE_STUB)
+ return 0;
+
+ /*
+ * Check if there's a P2P link in the router LSA with the
+ * corresponding link data in the same subnet.
+ */
+
+ p = ((uint8_t *)lsa) + OSPF_LSA_HEADER_SIZE + 4;
+ lim = ((uint8_t *)lsa) + ntohs(lsa->length);
+
+ while (p < lim) {
+ l = (struct router_lsa_link *)p;
+ p += (OSPF_ROUTER_LSA_LINK_SIZE
+ + (l->m[0].tos_count * OSPF_ROUTER_LSA_TOS_SIZE));
+
+ /* We only care about P2P with the proper link id */
+ if ((l->m[0].type != LSA_LINK_TYPE_POINTOPOINT)
+ || (l->link_id.s_addr != router_id.s_addr))
+ continue;
+
+ /* Link data in the subnet given by the link? */
+ if ((link->link_id.s_addr & link->link_data.s_addr)
+ == (l->link_data.s_addr & link->link_data.s_addr))
+ return 1;
+ }
+
+ break;
+ case OSPF_TI_LFA_UNDEFINED_PROTECTION:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * For TI-LFA we need the reverse SPF for Q spaces. The reverse SPF is created
+ * by honoring the weight of the reverse 'edge', e.g. the edge from W to V, and
+ * NOT the weight of the 'edge' from V to W as usual. Hence we need to find the
+ * corresponding link in the LSA of W and extract the particular weight.
+ *
+ * TODO: Only P2P supported by now!
+ */
+static uint16_t get_reverse_distance(struct vertex *v,
+ struct router_lsa_link *l,
+ struct ospf_lsa *w_lsa)
+{
+ uint8_t *p, *lim;
+ struct router_lsa_link *w_link;
+ uint16_t distance = 0;
+
+ assert(w_lsa && w_lsa->data);
+
+ p = ((uint8_t *)w_lsa->data) + OSPF_LSA_HEADER_SIZE + 4;
+ lim = ((uint8_t *)w_lsa->data) + ntohs(w_lsa->data->length);
+
+ while (p < lim) {
+ w_link = (struct router_lsa_link *)p;
+ p += (OSPF_ROUTER_LSA_LINK_SIZE
+ + (w_link->m[0].tos_count * OSPF_ROUTER_LSA_TOS_SIZE));
+
+ /* Only care about P2P with link ID equal to V's router id */
+ if (w_link->m[0].type == LSA_LINK_TYPE_POINTOPOINT
+ && w_link->link_id.s_addr == v->id.s_addr) {
+ distance = ntohs(w_link->m[0].metric);
+ break;
+ }
+ }
+
+ /*
+ * This might happen if the LSA for W is not complete yet. In this
+ * case we take the weight of the 'forward' link from V. When the LSA
+ * for W is completed the reverse SPF is run again anyway.
+ */
+ if (distance == 0)
+ distance = ntohs(l->m[0].metric);
+
+ if (IS_DEBUG_OSPF_EVENT)
+ zlog_debug("%s: reversed distance is %u", __func__, distance);
+
+ return distance;
+}
+
/*
* RFC2328 16.1 (2).
* v is on the SPF tree. Examine the links in v's LSA. Update the list of
@@ -850,6 +1310,7 @@ static void ospf_spf_next(struct vertex *v, struct ospf_area *area,
struct router_lsa_link *l = NULL;
struct in_addr *r;
int type = 0, lsa_pos = -1, lsa_pos_next = 0;
+ uint16_t link_distance;
/*
* If this is a router-LSA, and bit V of the router-LSA (see Section
@@ -892,6 +1353,16 @@ static void ospf_spf_next(struct vertex *v, struct ospf_area *area,
continue;
/*
+ * Don't process TI-LFA protected resources.
+ *
+ * TODO: Replace this by a proper solution, e.g. remove
+ * corresponding links from the LSDB and run the SPF
+ * algo with the stripped-down LSDB.
+ */
+ if (ospf_spf_is_protected_resource(area, l, v->lsa))
+ continue;
+
+ /*
* (b) Otherwise, W is a transit vertex (router or
* transit network). Look up the vertex W's LSA
* (router-LSA or network-LSA) in Area A's link state
@@ -928,8 +1399,19 @@ static void ospf_spf_next(struct vertex *v, struct ospf_area *area,
continue;
}
+ /*
+ * For TI-LFA we might need the reverse SPF.
+ * Currently only works with P2P!
+ */
+ if (type == LSA_LINK_TYPE_POINTOPOINT
+ && area->spf_reversed)
+ link_distance =
+ get_reverse_distance(v, l, w_lsa);
+ else
+ link_distance = ntohs(l->m[0].metric);
+
/* step (d) below */
- distance = v->distance + ntohs(l->m[0].metric);
+ distance = v->distance + link_distance;
} else {
/* In case of V is Network-LSA. */
r = (struct in_addr *)p;
@@ -1069,8 +1551,7 @@ void ospf_spf_print(struct vty *vty, struct vertex *v, int i)
struct vertex_parent *parent;
if (v->type == OSPF_VERTEX_ROUTER) {
- vty_out(vty, "SPF Result: depth %d [R] %pI4\n", i,
- &v->lsa->id);
+ vty_out(vty, "SPF Result: depth %d [R] %pI4\n", i, &v->lsa->id);
} else {
struct network_lsa *lsa = (struct network_lsa *)v->lsa;
vty_out(vty, "SPF Result: depth %d [N] %pI4/%d\n", i,
@@ -1078,9 +1559,11 @@ void ospf_spf_print(struct vty *vty, struct vertex *v, int i)
}
for (ALL_LIST_ELEMENTS_RO(v->parents, nnode, parent)) {
- vty_out(vty, " nexthop %pI4 lsa pos %d\n",
- &parent->nexthop->router,
- parent->nexthop->lsa_pos);
+ vty_out(vty,
+ " nexthop %pI4 lsa pos %d -- local nexthop %pI4 lsa pos %d\n",
+ &parent->nexthop->router, parent->nexthop->lsa_pos,
+ &parent->local_nexthop->router,
+ parent->local_nexthop->lsa_pos);
}
i++;
@@ -1128,7 +1611,9 @@ static void ospf_spf_process_stubs(struct ospf_area *area, struct vertex *v,
p += (OSPF_ROUTER_LSA_LINK_SIZE
+ (l->m[0].tos_count * OSPF_ROUTER_LSA_TOS_SIZE));
- if (l->m[0].type == LSA_LINK_TYPE_STUB)
+ /* Don't process TI-LFA protected resources */
+ if (l->m[0].type == LSA_LINK_TYPE_STUB
+ && !ospf_spf_is_protected_resource(area, l, v->lsa))
ospf_intra_add_stub(rt, l, v, area,
parent_is_root, lsa_pos);
lsa_pos++;
@@ -1190,73 +1675,13 @@ void ospf_spf_cleanup(struct vertex *spf, struct list *vertex_list)
* attached the first level of router vertices attached to the
* root vertex, see ospf_nexthop_calculation.
*/
- ospf_canonical_nexthops_free(spf);
+ if (spf)
+ ospf_canonical_nexthops_free(spf);
/* Free SPF vertices list with deconstructor ospf_vertex_free. */
- list_delete(&vertex_list);
-}
-
-#if 0
-static void
-ospf_rtrs_print (struct route_table *rtrs)
-{
- struct route_node *rn;
- struct list *or_list;
- struct listnode *ln;
- struct listnode *pnode;
- struct ospf_route *or;
- struct ospf_path *path;
- char buf1[BUFSIZ];
- char buf2[BUFSIZ];
-
- if (IS_DEBUG_OSPF_EVENT)
- zlog_debug ("ospf_rtrs_print() start");
-
- for (rn = route_top (rtrs); rn; rn = route_next (rn))
- if ((or_list = rn->info) != NULL)
- for (ALL_LIST_ELEMENTS_RO (or_list, ln, or))
- {
- switch (or->path_type)
- {
- case OSPF_PATH_INTRA_AREA:
- if (IS_DEBUG_OSPF_EVENT)
- zlog_debug ("%s [%d] area: %s",
- inet_ntop (AF_INET, &or->id, buf1, BUFSIZ),
- or->cost, inet_ntop (AF_INET, &or->u.std.area_id,
- buf2, BUFSIZ));
- break;
- case OSPF_PATH_INTER_AREA:
- if (IS_DEBUG_OSPF_EVENT)
- zlog_debug ("%s IA [%d] area: %s",
- inet_ntop (AF_INET, &or->id, buf1, BUFSIZ),
- or->cost, inet_ntop (AF_INET, &or->u.std.area_id,
- buf2, BUFSIZ));
- break;
- default:
- break;
- }
-
- for (ALL_LIST_ELEMENTS_RO (or->paths, pnode, path))
- {
- if (path->nexthop.s_addr == INADDR_ANY)
- {
- if (IS_DEBUG_OSPF_EVENT)
- zlog_debug (" directly attached to %s\r",
- ifindex2ifname (path->ifindex), VRF_DEFAULT);
- }
- else
- {
- if (IS_DEBUG_OSPF_EVENT)
- zlog_debug (" via %pI4, %s\r",
- &path->nexthop,
- ifindex2ifname (path->ifindex), VRF_DEFAULT);
- }
- }
- }
-
- zlog_debug ("ospf_rtrs_print() end");
+ if (vertex_list)
+ list_delete(&vertex_list);
}
-#endif
/* Calculating the shortest-path tree for an area, see RFC2328 16.1. */
void ospf_spf_calculate(struct ospf_area *area, struct ospf_lsa *root_lsa,
@@ -1359,19 +1784,27 @@ void ospf_spf_calculate(struct ospf_area *area, struct ospf_lsa *root_lsa,
if (IS_DEBUG_OSPF_EVENT)
zlog_debug("ospf_spf_calculate: Stop. %zd vertices",
mtype_stats_alloc(MTYPE_OSPF_VERTEX));
+}
+
+void ospf_spf_calculate_area(struct ospf *ospf, struct ospf_area *area,
+ struct route_table *new_table,
+ struct route_table *new_rtrs)
+{
+ ospf_spf_calculate(area, area->router_lsa_self, new_table, new_rtrs,
+ false, true);
- /* If this is a dry run then keep the SPF data in place */
- if (!area->spf_dry_run)
- ospf_spf_cleanup(area->spf, area->spf_vertex_list);
+ if (ospf->ti_lfa_enabled)
+ ospf_ti_lfa_compute(area, new_table,
+ ospf->ti_lfa_protection_type);
+
+ ospf_spf_cleanup(area->spf, area->spf_vertex_list);
}
-int ospf_spf_calculate_areas(struct ospf *ospf, struct route_table *new_table,
- struct route_table *new_rtrs, bool is_dry_run,
- bool is_root_node)
+void ospf_spf_calculate_areas(struct ospf *ospf, struct route_table *new_table,
+ struct route_table *new_rtrs)
{
struct ospf_area *area;
struct listnode *node, *nnode;
- int areas_processed = 0;
/* Calculate SPF for each area. */
for (ALL_LIST_ELEMENTS(ospf->areas, node, nnode, area)) {
@@ -1380,20 +1813,13 @@ int ospf_spf_calculate_areas(struct ospf *ospf, struct route_table *new_table,
if (ospf->backbone && ospf->backbone == area)
continue;
- ospf_spf_calculate(area, area->router_lsa_self, new_table,
- new_rtrs, is_dry_run, is_root_node);
- areas_processed++;
+ ospf_spf_calculate_area(ospf, area, new_table, new_rtrs);
}
/* SPF for backbone, if required */
- if (ospf->backbone) {
- area = ospf->backbone;
- ospf_spf_calculate(area, area->router_lsa_self, new_table,
- new_rtrs, is_dry_run, is_root_node);
- areas_processed++;
- }
-
- return areas_processed;
+ if (ospf->backbone)
+ ospf_spf_calculate_area(ospf, ospf->backbone, new_table,
+ new_rtrs);
}
/* Worker for SPF calculation scheduler. */
@@ -1402,7 +1828,6 @@ static int ospf_spf_calculate_schedule_worker(struct thread *thread)
struct ospf *ospf = THREAD_ARG(thread);
struct route_table *new_table, *new_rtrs;
struct timeval start_time, spf_start_time;
- int areas_processed;
unsigned long ia_time, prune_time, rt_time;
unsigned long abr_time, total_spf_time, spf_time;
char rbuf[32]; /* reason_buf */
@@ -1418,8 +1843,7 @@ static int ospf_spf_calculate_schedule_worker(struct thread *thread)
monotime(&spf_start_time);
new_table = route_table_init(); /* routing table */
new_rtrs = route_table_init(); /* ABR/ASBR routing table */
- areas_processed = ospf_spf_calculate_areas(ospf, new_table, new_rtrs,
- false, true);
+ ospf_spf_calculate_areas(ospf, new_table, new_rtrs);
spf_time = monotime_since(&spf_start_time, NULL);
ospf_vl_shut_unapproved(ospf);
@@ -1512,7 +1936,7 @@ static int ospf_spf_calculate_schedule_worker(struct thread *thread)
zlog_info(" RouteInstall: %ld", rt_time);
if (IS_OSPF_ABR(ospf))
zlog_info(" ABR: %ld (%d areas)",
- abr_time, areas_processed);
+ abr_time, ospf->areas->count);
zlog_info("Reason(s) for SPF: %s", rbuf);
}
diff --git a/ospfd/ospf_spf.h b/ospfd/ospf_spf.h
index 2dc0f8b886..66555be4b7 100644
--- a/ospfd/ospf_spf.h
+++ b/ospfd/ospf_spf.h
@@ -47,15 +47,15 @@ struct vertex {
struct list *children; /* list of children in SPF tree*/
};
-/* A nexthop taken on the root node to get to this (parent) vertex */
struct vertex_nexthop {
struct in_addr router; /* router address to send to */
int lsa_pos; /* LSA position for resolving the interface */
};
struct vertex_parent {
- struct vertex_nexthop *nexthop; /* nexthop address for this parent */
- struct vertex *parent; /* parent vertex */
+ struct vertex_nexthop *nexthop; /* nexthop taken on the root node */
+ struct vertex_nexthop *local_nexthop; /* local nexthop of the parent */
+ struct vertex *parent; /* parent vertex */
int backlink; /* index back to parent for router-lsa's */
};
@@ -77,12 +77,25 @@ extern void ospf_spf_calculate(struct ospf_area *area,
struct route_table *new_table,
struct route_table *new_rtrs, bool is_dry_run,
bool is_root_node);
-extern int ospf_spf_calculate_areas(struct ospf *ospf,
+extern void ospf_spf_calculate_area(struct ospf *ospf, struct ospf_area *area,
struct route_table *new_table,
- struct route_table *new_rtrs,
- bool is_dry_run, bool is_root_node);
+ struct route_table *new_rtrs);
+extern void ospf_spf_calculate_areas(struct ospf *ospf,
+ struct route_table *new_table,
+ struct route_table *new_rtrs);
extern void ospf_rtrs_free(struct route_table *);
extern void ospf_spf_cleanup(struct vertex *spf, struct list *vertex_list);
+extern void ospf_spf_copy(struct vertex *vertex, struct list *vertex_list);
+extern void ospf_spf_remove_resource(struct vertex *vertex,
+ struct list *vertex_list,
+ struct protected_resource *resource);
+extern struct vertex *ospf_spf_vertex_find(struct in_addr id,
+ struct list *vertex_list);
+extern struct vertex *ospf_spf_vertex_by_nexthop(struct vertex *root,
+ struct in_addr *nexthop);
+extern struct vertex_parent *ospf_spf_vertex_parent_find(struct in_addr id,
+ struct vertex *vertex);
+extern int vertex_parent_cmp(void *aa, void *bb);
extern void ospf_spf_print(struct vty *vty, struct vertex *v, int i);
diff --git a/ospfd/ospf_sr.c b/ospfd/ospf_sr.c
index e2218957d2..7b2d794214 100644
--- a/ospfd/ospf_sr.c
+++ b/ospfd/ospf_sr.c
@@ -159,6 +159,16 @@ static struct sr_node *sr_node_new(struct in_addr *rid)
return new;
}
+/* Supposed to be used for testing */
+struct sr_node *ospf_sr_node_create(struct in_addr *rid)
+{
+ struct sr_node *srn;
+
+ srn = hash_get(OspfSR.neighbors, (void *)rid, (void *)sr_node_new);
+
+ return srn;
+}
+
/* Delete Segment Routing node */
static void sr_node_del(struct sr_node *srn)
{
@@ -588,11 +598,6 @@ int ospf_sr_init(void)
if (OspfSR.neighbors == NULL)
return rc;
- /* Initialize Route Table for prefix */
- OspfSR.prefix = route_table_init();
- if (OspfSR.prefix == NULL)
- return rc;
-
/* Register Segment Routing VTY command */
ospf_sr_register_vty();
@@ -616,9 +621,6 @@ void ospf_sr_term(void)
if (OspfSR.neighbors)
hash_free(OspfSR.neighbors);
- /* Clear Prefix Table */
- if (OspfSR.prefix)
- route_table_finish(OspfSR.prefix);
}
/*
@@ -653,6 +655,56 @@ static mpls_label_t index2label(uint32_t index, struct sr_block srgb)
return label;
}
+/* Get the prefix sid for a specific router id */
+mpls_label_t ospf_sr_get_prefix_sid_by_id(struct in_addr *id)
+{
+ struct sr_node *srn;
+ struct sr_prefix *srp;
+ mpls_label_t label;
+
+ srn = (struct sr_node *)hash_lookup(OspfSR.neighbors, id);
+
+ if (srn) {
+ /*
+ * TODO: Here we assume that the SRGBs are the same,
+ * and that the node's prefix SID is at the head of
+ * the list, probably needs tweaking.
+ */
+ srp = listnode_head(srn->ext_prefix);
+ label = index2label(srp->sid, srn->srgb);
+ } else {
+ label = MPLS_INVALID_LABEL;
+ }
+
+ return label;
+}
+
+/* Get the adjacency sid for a specific 'root' id and 'neighbor' id */
+mpls_label_t ospf_sr_get_adj_sid_by_id(struct in_addr *root_id,
+ struct in_addr *neighbor_id)
+{
+ struct sr_node *srn;
+ struct sr_link *srl;
+ mpls_label_t label;
+ struct listnode *node;
+
+ srn = (struct sr_node *)hash_lookup(OspfSR.neighbors, root_id);
+
+ label = MPLS_INVALID_LABEL;
+
+ if (srn) {
+ for (ALL_LIST_ELEMENTS_RO(srn->ext_link, node, srl)) {
+ if (srl->type == ADJ_SID
+ && srl->remote_id.s_addr == neighbor_id->s_addr) {
+ label = srl->sid[0];
+ break;
+ }
+ }
+ }
+
+ return label;
+}
+
/* Get neighbor full structure from address */
static struct ospf_neighbor *get_neighbor_by_addr(struct ospf *top,
struct in_addr addr)
@@ -853,8 +905,13 @@ static int compute_prefix_nhlfe(struct sr_prefix *srp)
* be received before corresponding Router Information LSA
*/
if (srnext == NULL || srnext->srgb.lower_bound == 0
- || srnext->srgb.range_size == 0)
+ || srnext->srgb.range_size == 0) {
+ osr_debug(
+ " |- SR-Node %pI4 not ready. Stop process",
+ &srnext->adv_router);
+ path->srni.label_out = MPLS_INVALID_LABEL;
continue;
+ }
osr_debug(" |- Found SRGB %u/%u for next hop SR-Node %pI4",
srnext->srgb.range_size, srnext->srgb.lower_bound,
@@ -1213,7 +1270,7 @@ static void update_in_nhlfe(struct hash_bucket *bucket, void *args)
/*
* When SRGB has changed, update NHLFE Output Label for all Extended Prefix
- * with SID index which use the given SR-Node as nexthop though hash_iterate()
+ * with SID index which use the given SR-Node as nexthop through hash_iterate()
*/
static void update_out_nhlfe(struct hash_bucket *bucket, void *args)
{
@@ -1223,21 +1280,29 @@ static void update_out_nhlfe(struct hash_bucket *bucket, void *args)
struct sr_prefix *srp;
struct ospf_path *path;
+ /* Skip Self SR-Node */
+ if (srn == OspfSR.self)
+ return;
+
+ osr_debug("SR (%s): Update Out NHLFE for neighbor SR-Node %pI4",
+ __func__, &srn->adv_router);
+
for (ALL_LIST_ELEMENTS_RO(srn->ext_prefix, node, srp)) {
- /* Process only SID Index with valid route */
+ /* Skip Prefix that has not yet a valid route */
if (srp->route == NULL)
continue;
for (ALL_LIST_ELEMENTS_RO(srp->route->paths, pnode, path)) {
- /* Process only SID Index for next hop without PHP */
- if ((path->srni.nexthop == srp->srn)
- && (!CHECK_FLAG(srp->flags,
- EXT_SUBTLV_PREFIX_SID_NPFLG)))
+ /* Skip path that has not next SR-Node as nexthop */
+ if (path->srni.nexthop != srnext)
continue;
- path->srni.label_out =
- index2label(srp->sid, srnext->srgb);
- ospf_zebra_update_prefix_sid(srp);
+
+ /* Compute new Output Label */
+ path->srni.label_out = sr_prefix_out_label(srp, srnext);
}
+
+ /* Finally update MPLS table */
+ ospf_zebra_update_prefix_sid(srp);
}
}
@@ -1378,11 +1443,6 @@ void ospf_sr_ri_lsa_update(struct ospf_lsa *lsa)
srn->srlb.lower_bound = GET_LABEL(ntohl(ri_srlb->lower.value));
}
- osr_debug(" |- Update SR-Node[%pI4], SRGB[%u/%u], SRLB[%u/%u], Algo[%u], MSD[%u]",
- &srn->adv_router, srn->srgb.lower_bound, srn->srgb.range_size,
- srn->srlb.lower_bound, srn->srlb.range_size, srn->algo[0],
- srn->msd);
-
/* Check if SRGB has changed */
if ((srn->srgb.range_size == srgb.range_size)
&& (srn->srgb.lower_bound == srgb.lower_bound))
@@ -1392,6 +1452,11 @@ void ospf_sr_ri_lsa_update(struct ospf_lsa *lsa)
srn->srgb.range_size = srgb.range_size;
srn->srgb.lower_bound = srgb.lower_bound;
+ osr_debug(" |- Update SR-Node[%pI4], SRGB[%u/%u], SRLB[%u/%u], Algo[%u], MSD[%u]",
+ &srn->adv_router, srn->srgb.lower_bound, srn->srgb.range_size,
+ srn->srlb.lower_bound, srn->srlb.range_size, srn->algo[0],
+ srn->msd);
+
/* ... and NHLFE if it is a neighbor SR node */
if (srn->neighbor == OspfSR.self)
hash_iterate(OspfSR.neighbors, update_out_nhlfe, srn);
@@ -1562,6 +1627,7 @@ void ospf_sr_ext_itf_add(struct ext_itf *exti)
srl->itf_addr = exti->link.link_data;
srl->instance =
SET_OPAQUE_LSID(OPAQUE_TYPE_EXTENDED_LINK_LSA, exti->instance);
+ srl->remote_id = exti->link.link_id;
switch (exti->stype) {
case ADJ_SID:
srl->type = ADJ_SID;
diff --git a/ospfd/ospf_sr.h b/ospfd/ospf_sr.h
index 222675944d..ce13457484 100644
--- a/ospfd/ospf_sr.h
+++ b/ospfd/ospf_sr.h
@@ -233,9 +233,6 @@ struct ospf_sr_db {
/* List of neighbour SR nodes */
struct hash *neighbors;
- /* List of SR prefix */
- struct route_table *prefix;
-
/* Local SR info announced in Router Info LSA */
/* Algorithms supported by the node */
@@ -290,6 +287,9 @@ struct sr_link {
/* 24-bit Opaque-ID field value according to RFC 7684 specification */
uint32_t instance;
+ /* Addressed (remote) router id */
+ struct in_addr remote_id;
+
/* Interface address */
struct in_addr itf_addr;
@@ -361,4 +361,11 @@ extern void ospf_sr_update_local_prefix(struct interface *ifp,
struct prefix *p);
/* Segment Routing re-routing function */
extern void ospf_sr_update_task(struct ospf *ospf);
+
+/* Support for TI-LFA */
+extern mpls_label_t ospf_sr_get_prefix_sid_by_id(struct in_addr *id);
+extern mpls_label_t ospf_sr_get_adj_sid_by_id(struct in_addr *root_id,
+ struct in_addr *neighbor_id);
+extern struct sr_node *ospf_sr_node_create(struct in_addr *rid);
+
#endif /* _FRR_OSPF_SR_H */
diff --git a/ospfd/ospf_ti_lfa.c b/ospfd/ospf_ti_lfa.c
new file mode 100644
index 0000000000..4a0186bfb9
--- /dev/null
+++ b/ospfd/ospf_ti_lfa.c
@@ -0,0 +1,1114 @@
+/*
+ * OSPF TI-LFA
+ * Copyright (C) 2020 NetDEF, Inc.
+ * Sascha Kattelmann
+ *
+ * This file is part of FRR.
+ *
+ * FRR is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRR is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "prefix.h"
+#include "table.h"
+#include "printfrr.h"
+
+#include "ospfd/ospfd.h"
+#include "ospfd/ospf_asbr.h"
+#include "ospfd/ospf_lsa.h"
+#include "ospfd/ospf_spf.h"
+#include "ospfd/ospf_sr.h"
+#include "ospfd/ospf_route.h"
+#include "ospfd/ospf_ti_lfa.h"
+#include "ospfd/ospf_dump.h"
+
+
+DECLARE_RBTREE_UNIQ(p_spaces, struct p_space, p_spaces_item,
+ p_spaces_compare_func)
+DECLARE_RBTREE_UNIQ(q_spaces, struct q_space, q_spaces_item,
+ q_spaces_compare_func)
+
+static void
+ospf_ti_lfa_generate_p_space(struct ospf_area *area, struct vertex *child,
+ struct protected_resource *protected_resource,
+ bool recursive, struct list *pc_path);
+
+void ospf_print_protected_resource(
+ struct protected_resource *protected_resource, char *buf)
+{
+ struct router_lsa_link *link;
+
+ switch (protected_resource->type) {
+ case OSPF_TI_LFA_LINK_PROTECTION:
+ link = protected_resource->link;
+ snprintfrr(buf, PROTECTED_RESOURCE_STRLEN,
+ "protected link: %pI4 %pI4", &link->link_id,
+ &link->link_data);
+ break;
+ case OSPF_TI_LFA_NODE_PROTECTION:
+ snprintfrr(buf, PROTECTED_RESOURCE_STRLEN,
+ "protected node: %pI4",
+ &protected_resource->router_id);
+ break;
+ case OSPF_TI_LFA_UNDEFINED_PROTECTION:
+ snprintfrr(buf, PROTECTED_RESOURCE_STRLEN,
+ "undefined protected resource");
+ break;
+ }
+}
+
+static enum ospf_ti_lfa_p_q_space_adjacency
+ospf_ti_lfa_find_p_node(struct vertex *pc_node, struct p_space *p_space,
+ struct q_space *q_space)
+{
+ struct listnode *curr_node;
+ struct vertex *p_node = NULL, *pc_node_parent, *p_node_pc_parent;
+ struct vertex_parent *pc_vertex_parent;
+
+ curr_node = listnode_lookup(q_space->pc_path, pc_node);
+ pc_node_parent = listgetdata(curr_node->next);
+
+ q_space->p_node_info->type = OSPF_TI_LFA_UNDEFINED_NODE;
+
+ p_node = ospf_spf_vertex_find(pc_node_parent->id, p_space->vertex_list);
+
+ if (p_node) {
+ q_space->p_node_info->node = p_node;
+ q_space->p_node_info->type = OSPF_TI_LFA_P_NODE;
+
+ if (curr_node->next->next) {
+ p_node_pc_parent = listgetdata(curr_node->next->next);
+ pc_vertex_parent = ospf_spf_vertex_parent_find(
+ p_node_pc_parent->id, pc_node_parent);
+ q_space->p_node_info->nexthop =
+ pc_vertex_parent->nexthop->router;
+ } else {
+ /*
+ * It can happen that the P node is the root node itself
+ * (hence there can be no parents). In this case we
+ * don't need to set a nexthop.
+ */
+ q_space->p_node_info->nexthop.s_addr = INADDR_ANY;
+ }
+
+ return OSPF_TI_LFA_P_Q_SPACE_ADJACENT;
+ }
+
+ ospf_ti_lfa_find_p_node(pc_node_parent, p_space, q_space);
+ return OSPF_TI_LFA_P_Q_SPACE_NON_ADJACENT;
+}
+
+static void ospf_ti_lfa_find_q_node(struct vertex *pc_node,
+ struct p_space *p_space,
+ struct q_space *q_space)
+{
+ struct listnode *curr_node, *next_node;
+ struct vertex *p_node, *q_node, *q_space_parent = NULL, *pc_node_parent;
+ struct vertex_parent *pc_vertex_parent;
+
+ curr_node = listnode_lookup(q_space->pc_path, pc_node);
+ next_node = curr_node->next;
+ pc_node_parent = listgetdata(next_node);
+ pc_vertex_parent =
+ ospf_spf_vertex_parent_find(pc_node_parent->id, pc_node);
+
+ p_node = ospf_spf_vertex_find(pc_node->id, p_space->vertex_list);
+ q_node = ospf_spf_vertex_find(pc_node->id, q_space->vertex_list);
+
+ /* The Q node is always present. */
+ assert(q_node);
+
+ q_space->q_node_info->type = OSPF_TI_LFA_UNDEFINED_NODE;
+
+ if (p_node && q_node) {
+ q_space->q_node_info->node = pc_node;
+ q_space->q_node_info->type = OSPF_TI_LFA_PQ_NODE;
+ q_space->q_node_info->nexthop =
+ pc_vertex_parent->nexthop->router;
+ return;
+ }
+
+ /*
+ * Note that the Q space has the 'reverse' direction of the PC
+ * SPF. Hence compare PC SPF parent to Q space children.
+ */
+ q_space_parent =
+ ospf_spf_vertex_find(pc_node_parent->id, q_node->children);
+
+ /*
+ * If the Q space parent doesn't exist we 'hit' the border to the P
+ * space and hence got our Q node.
+ */
+ if (!q_space_parent) {
+ q_space->q_node_info->node = pc_node;
+ q_space->q_node_info->type = OSPF_TI_LFA_Q_NODE;
+ q_space->q_node_info->nexthop =
+ pc_vertex_parent->nexthop->router;
+ return;
+ }
+
+ return ospf_ti_lfa_find_q_node(pc_node_parent, p_space, q_space);
+}
+
+static void ospf_ti_lfa_append_label_stack(struct mpls_label_stack *label_stack,
+ mpls_label_t labels[],
+ uint32_t num_labels)
+{
+ int i, offset, limit;
+
+ limit = label_stack->num_labels + num_labels;
+ offset = label_stack->num_labels;
+
+ for (i = label_stack->num_labels; i < limit; i++) {
+ label_stack->label[i] = labels[i - offset];
+ label_stack->num_labels++;
+ }
+}
+
+
+static struct mpls_label_stack *
+ospf_ti_lfa_create_label_stack(mpls_label_t labels[], uint32_t num_labels)
+{
+ struct mpls_label_stack *label_stack;
+ uint32_t i;
+
+ /* Sanity check */
+ for (i = 0; i < num_labels; i++) {
+ if (labels[i] == MPLS_INVALID_LABEL)
+ return NULL;
+ }
+
+ label_stack = XCALLOC(MTYPE_OSPF_Q_SPACE,
+ sizeof(struct mpls_label_stack)
+ + MPLS_MAX_LABELS * sizeof(mpls_label_t));
+ label_stack->num_labels = num_labels;
+
+ for (i = 0; i < num_labels; i++)
+ label_stack->label[i] = labels[i];
+
+ return label_stack;
+}
+
+static struct list *
+ospf_ti_lfa_map_path_to_pc_vertices(struct list *path,
+ struct list *pc_vertex_list)
+{
+ struct listnode *node;
+ struct vertex *vertex, *pc_vertex;
+ struct list *pc_path;
+
+ pc_path = list_new();
+
+ for (ALL_LIST_ELEMENTS_RO(path, node, vertex)) {
+ pc_vertex = ospf_spf_vertex_find(vertex->id, pc_vertex_list);
+ listnode_add(pc_path, pc_vertex);
+ }
+
+ return pc_path;
+}
+
+static struct list *ospf_ti_lfa_cut_out_pc_path(struct list *pc_vertex_list,
+ struct list *pc_path,
+ struct vertex *p_node,
+ struct vertex *q_node)
+{
+ struct list *inner_pc_path;
+ struct vertex *current_vertex;
+ struct listnode *current_listnode;
+
+ inner_pc_path = list_new();
+ current_vertex = ospf_spf_vertex_find(q_node->id, pc_vertex_list);
+ current_listnode = listnode_lookup(pc_path, current_vertex);
+
+ /* Note that the post-convergence paths are reversed. */
+ for (;;) {
+ current_vertex = listgetdata(current_listnode);
+ listnode_add(inner_pc_path, current_vertex);
+
+ if (current_vertex->id.s_addr == p_node->id.s_addr)
+ break;
+
+ current_listnode = current_listnode->next;
+ }
+
+ return inner_pc_path;
+}
+
+static void ospf_ti_lfa_generate_inner_label_stack(
+ struct ospf_area *area, struct p_space *p_space,
+ struct q_space *q_space,
+ struct ospf_ti_lfa_inner_backup_path_info *inner_backup_path_info)
+{
+ struct route_table *new_table, *new_rtrs;
+ struct vertex *q_node;
+ struct vertex *start_vertex, *end_vertex;
+ struct vertex_parent *vertex_parent;
+ struct listnode *pc_p_node, *pc_q_node;
+ struct vertex *spf_orig;
+ struct list *vertex_list_orig;
+ struct p_spaces_head *p_spaces_orig;
+ struct p_space *inner_p_space;
+ struct q_space *inner_q_space;
+ struct ospf_ti_lfa_node_info *p_node_info, *q_node_info;
+ struct protected_resource *protected_resource;
+ struct list *inner_pc_path;
+ mpls_label_t start_label, end_label;
+
+ p_node_info = q_space->p_node_info;
+ q_node_info = q_space->q_node_info;
+ protected_resource = p_space->protected_resource;
+
+ start_vertex = p_node_info->node;
+ end_vertex = q_node_info->node;
+
+ /*
+ * It can happen that the P node and/or the Q node are the root or
+ * the destination, therefore we need to force one step forward (resp.
+ * backward) using an Adjacency-SID.
+ */
+ start_label = MPLS_INVALID_LABEL;
+ end_label = MPLS_INVALID_LABEL;
+ if (p_node_info->node->id.s_addr == p_space->root->id.s_addr) {
+ pc_p_node = listnode_lookup(q_space->pc_path, p_space->pc_spf);
+ start_vertex = listgetdata(pc_p_node->prev);
+ start_label = ospf_sr_get_adj_sid_by_id(&p_node_info->node->id,
+ &start_vertex->id);
+ }
+ if (q_node_info->node->id.s_addr == q_space->root->id.s_addr) {
+ pc_q_node = listnode_lookup(q_space->pc_path,
+ listnode_head(q_space->pc_path));
+ end_vertex = listgetdata(pc_q_node->next);
+ end_label = ospf_sr_get_adj_sid_by_id(&end_vertex->id,
+ &q_node_info->node->id);
+ }
+
+ /* Corner case: inner path is just one node */
+ if (start_vertex->id.s_addr == end_vertex->id.s_addr) {
+ inner_backup_path_info->label_stack =
+ ospf_ti_lfa_create_label_stack(&start_label, 1);
+ inner_backup_path_info->q_node_info.node = end_vertex;
+ inner_backup_path_info->q_node_info.type = OSPF_TI_LFA_PQ_NODE;
+ inner_backup_path_info->p_node_info.type =
+ OSPF_TI_LFA_UNDEFINED_NODE;
+ vertex_parent = ospf_spf_vertex_parent_find(p_space->root->id,
+ end_vertex);
+ inner_backup_path_info->p_node_info.nexthop =
+ vertex_parent->nexthop->router;
+ return;
+ }
+
+ inner_pc_path = ospf_ti_lfa_cut_out_pc_path(p_space->pc_vertex_list,
+ q_space->pc_path,
+ start_vertex, end_vertex);
+
+ new_table = route_table_init();
+ new_rtrs = route_table_init();
+
+ /* Copy the current state ... */
+ spf_orig = area->spf;
+ vertex_list_orig = area->spf_vertex_list;
+ p_spaces_orig = area->p_spaces;
+
+ area->p_spaces =
+ XCALLOC(MTYPE_OSPF_P_SPACE, sizeof(struct p_spaces_head));
+
+ /* dry run true, root node false */
+ ospf_spf_calculate(area, start_vertex->lsa_p, new_table, new_rtrs, true,
+ false);
+
+ q_node = ospf_spf_vertex_find(end_vertex->id, area->spf_vertex_list);
+
+ ospf_ti_lfa_generate_p_space(area, q_node, protected_resource, false,
+ inner_pc_path);
+
+ /* There's just one P and Q space */
+ inner_p_space = p_spaces_pop(area->p_spaces);
+ inner_q_space = q_spaces_pop(inner_p_space->q_spaces);
+
+ /* Copy over inner backup path information from the inner q_space */
+
+ /* In case the outer P node is also the root of the P space */
+ if (start_label != MPLS_INVALID_LABEL) {
+ inner_backup_path_info->label_stack =
+ ospf_ti_lfa_create_label_stack(&start_label, 1);
+ ospf_ti_lfa_append_label_stack(
+ inner_backup_path_info->label_stack,
+ inner_q_space->label_stack->label,
+ inner_q_space->label_stack->num_labels);
+ inner_backup_path_info->p_node_info.node = start_vertex;
+ inner_backup_path_info->p_node_info.type = OSPF_TI_LFA_P_NODE;
+ vertex_parent = ospf_spf_vertex_parent_find(p_space->root->id,
+ start_vertex);
+ inner_backup_path_info->p_node_info.nexthop =
+ vertex_parent->nexthop->router;
+ } else {
+ memcpy(inner_backup_path_info->label_stack,
+ inner_q_space->label_stack,
+ sizeof(struct mpls_label_stack)
+ + sizeof(mpls_label_t)
+ * inner_q_space->label_stack
+ ->num_labels);
+ memcpy(&inner_backup_path_info->p_node_info,
+ inner_q_space->p_node_info,
+ sizeof(struct ospf_ti_lfa_node_info));
+ }
+
+ /* In case the outer Q node is also the root of the Q space */
+ if (end_label != MPLS_INVALID_LABEL) {
+ inner_backup_path_info->q_node_info.node = end_vertex;
+ inner_backup_path_info->q_node_info.type = OSPF_TI_LFA_Q_NODE;
+ } else {
+ memcpy(&inner_backup_path_info->q_node_info,
+ inner_q_space->q_node_info,
+ sizeof(struct ospf_ti_lfa_node_info));
+ }
+
+ /* Cleanup */
+ ospf_ti_lfa_free_p_spaces(area);
+ ospf_spf_cleanup(area->spf, area->spf_vertex_list);
+
+ /* ... and copy the current state back. */
+ area->spf = spf_orig;
+ area->spf_vertex_list = vertex_list_orig;
+ area->p_spaces = p_spaces_orig;
+}
+
+static void ospf_ti_lfa_generate_label_stack(struct ospf_area *area,
+ struct p_space *p_space,
+ struct q_space *q_space)
+{
+ enum ospf_ti_lfa_p_q_space_adjacency adjacency_result;
+ mpls_label_t labels[MPLS_MAX_LABELS];
+ struct vertex *pc_node;
+ struct ospf_ti_lfa_inner_backup_path_info inner_backup_path_info;
+
+ if (IS_DEBUG_OSPF_TI_LFA)
+ zlog_debug(
+ "%s: Generating Label stack for src %pI4 and dest %pI4.",
+ __func__, &p_space->root->id, &q_space->root->id);
+
+ pc_node = listnode_head(q_space->pc_path);
+
+ if (!pc_node) {
+ if (IS_DEBUG_OSPF_TI_LFA)
+ zlog_debug(
+ "%s: There seems to be no post convergence path (yet).",
+ __func__);
+ return;
+ }
+
+ ospf_ti_lfa_find_q_node(pc_node, p_space, q_space);
+ if (q_space->q_node_info->type == OSPF_TI_LFA_UNDEFINED_NODE) {
+ if (IS_DEBUG_OSPF_TI_LFA)
+ zlog_debug("%s: Q node not found!", __func__);
+ return;
+ }
+
+ /* Found a PQ node? Then we are done here. */
+ if (q_space->q_node_info->type == OSPF_TI_LFA_PQ_NODE) {
+ /*
+ * If the PQ node is a child of the root, then we can use an
+ * adjacency SID instead of a prefix SID for the backup path.
+ */
+ if (ospf_spf_vertex_parent_find(p_space->root->id,
+ q_space->q_node_info->node))
+ labels[0] = ospf_sr_get_adj_sid_by_id(
+ &p_space->root->id,
+ &q_space->q_node_info->node->id);
+ else
+ labels[0] = ospf_sr_get_prefix_sid_by_id(
+ &q_space->q_node_info->node->id);
+
+ q_space->label_stack =
+ ospf_ti_lfa_create_label_stack(labels, 1);
+ q_space->nexthop = q_space->q_node_info->nexthop;
+
+ return;
+ }
+
+ /* Otherwise find a (hopefully adjacent) P node. */
+ pc_node = ospf_spf_vertex_find(q_space->q_node_info->node->id,
+ p_space->pc_vertex_list);
+ adjacency_result = ospf_ti_lfa_find_p_node(pc_node, p_space, q_space);
+
+ if (q_space->p_node_info->type == OSPF_TI_LFA_UNDEFINED_NODE) {
+ if (IS_DEBUG_OSPF_TI_LFA)
+ zlog_debug("%s: P node not found!", __func__);
+ return;
+ }
+
+ /*
+ * This should be the regular case: P and Q space are adjacent or even
+ * overlapping. This is guaranteed for link protection when used with
+ * symmetric weights.
+ */
+ if (adjacency_result == OSPF_TI_LFA_P_Q_SPACE_ADJACENT) {
+ /*
+ * It can happen that the P node is the root itself, therefore
+ * we don't need a label for it. So just one adjacency SID for
+ * the Q node.
+ */
+ if (q_space->p_node_info->node->id.s_addr
+ == p_space->root->id.s_addr) {
+ labels[0] = ospf_sr_get_adj_sid_by_id(
+ &p_space->root->id,
+ &q_space->q_node_info->node->id);
+ q_space->label_stack =
+ ospf_ti_lfa_create_label_stack(labels, 1);
+ q_space->nexthop = q_space->q_node_info->nexthop;
+ return;
+ }
+
+ /*
+ * Otherwise we have a P and also a Q node (which are adjacent).
+ *
+ * It can happen that the P node is a child of the root,
+ * therefore we might just need the adjacency SID for the P node
+ * instead of the prefix SID. For the Q node always take the
+ * adjacency SID.
+ */
+ if (ospf_spf_vertex_parent_find(p_space->root->id,
+ q_space->p_node_info->node))
+ labels[0] = ospf_sr_get_adj_sid_by_id(
+ &p_space->root->id,
+ &q_space->p_node_info->node->id);
+ else
+ labels[0] = ospf_sr_get_prefix_sid_by_id(
+ &q_space->p_node_info->node->id);
+
+ labels[1] = ospf_sr_get_adj_sid_by_id(
+ &q_space->p_node_info->node->id,
+ &q_space->q_node_info->node->id);
+
+ q_space->label_stack =
+ ospf_ti_lfa_create_label_stack(labels, 2);
+ q_space->nexthop = q_space->p_node_info->nexthop;
+
+ } else {
+ /*
+ * It can happen that the P and Q space are not adjacent when
+ * e.g. node protection or asymmetric weights are used. In this
+ * case the found P and Q nodes are used as a reference for
+ * another run of the algorithm!
+ *
+ * After having found the inner label stack it is stitched
+ * together with the outer labels.
+ */
+ inner_backup_path_info.label_stack = XCALLOC(
+ MTYPE_OSPF_PATH,
+ sizeof(struct mpls_label_stack)
+ + sizeof(mpls_label_t) * MPLS_MAX_LABELS);
+ ospf_ti_lfa_generate_inner_label_stack(area, p_space, q_space,
+ &inner_backup_path_info);
+
+ /*
+ * First stitch together the outer P node label with the inner
+ * label stack.
+ */
+ if (q_space->p_node_info->node->id.s_addr
+ == p_space->root->id.s_addr) {
+ /*
+ * It can happen that the P node is the root itself,
+ * therefore we don't need a label for it. Just take
+ * the inner label stack first.
+ */
+ q_space->label_stack = ospf_ti_lfa_create_label_stack(
+ inner_backup_path_info.label_stack->label,
+ inner_backup_path_info.label_stack->num_labels);
+
+ /* Use the inner P or Q node for the nexthop */
+ if (inner_backup_path_info.p_node_info.type
+ != OSPF_TI_LFA_UNDEFINED_NODE)
+ q_space->nexthop = inner_backup_path_info
+ .p_node_info.nexthop;
+ else
+ q_space->nexthop = inner_backup_path_info
+ .q_node_info.nexthop;
+
+ } else if (ospf_spf_vertex_parent_find(
+ p_space->root->id,
+ q_space->p_node_info->node)) {
+ /*
+ * It can happen that the outer P node is a child of
+ * the root, therefore we might just need the
+ * adjacency SID for the outer P node instead of the
+ * prefix SID. Then just append the inner label stack.
+ */
+ labels[0] = ospf_sr_get_adj_sid_by_id(
+ &p_space->root->id,
+ &q_space->p_node_info->node->id);
+ q_space->label_stack =
+ ospf_ti_lfa_create_label_stack(labels, 1);
+ ospf_ti_lfa_append_label_stack(
+ q_space->label_stack,
+ inner_backup_path_info.label_stack->label,
+ inner_backup_path_info.label_stack->num_labels);
+ q_space->nexthop = q_space->p_node_info->nexthop;
+ } else {
+ /* The outer P node needs a Prefix-SID here */
+ labels[0] = ospf_sr_get_prefix_sid_by_id(
+ &q_space->p_node_info->node->id);
+ q_space->label_stack =
+ ospf_ti_lfa_create_label_stack(labels, 1);
+ ospf_ti_lfa_append_label_stack(
+ q_space->label_stack,
+ inner_backup_path_info.label_stack->label,
+ inner_backup_path_info.label_stack->num_labels);
+ q_space->nexthop = q_space->p_node_info->nexthop;
+ }
+
+ /* Now the outer Q node needs to be considered */
+ if (ospf_spf_vertex_parent_find(
+ inner_backup_path_info.q_node_info.node->id,
+ q_space->q_node_info->node)) {
+ /*
+ * The outer Q node can be a child of the inner Q node,
+ * hence just add an Adjacency-SID.
+ */
+ labels[0] = ospf_sr_get_adj_sid_by_id(
+ &inner_backup_path_info.q_node_info.node->id,
+ &q_space->q_node_info->node->id);
+ ospf_ti_lfa_append_label_stack(q_space->label_stack,
+ labels, 1);
+ } else {
+ /* Otherwise a Prefix-SID is needed */
+ labels[0] = ospf_sr_get_prefix_sid_by_id(
+ &q_space->q_node_info->node->id);
+ ospf_ti_lfa_append_label_stack(q_space->label_stack,
+ labels, 1);
+ }
+ /*
+ * Note that there's also the case where the inner and outer Q
+ * node are the same, but then there's nothing to do!
+ */
+ }
+}
+
+static struct list *
+ospf_ti_lfa_generate_post_convergence_path(struct list *pc_vertex_list,
+ struct vertex *dest)
+{
+ struct list *pc_path;
+ struct vertex *current_vertex;
+ struct vertex_parent *parent;
+
+ current_vertex = ospf_spf_vertex_find(dest->id, pc_vertex_list);
+ if (!current_vertex) {
+ if (IS_DEBUG_OSPF_TI_LFA)
+ zlog_debug(
+ "%s: There seems to be no post convergence path (yet).",
+ __func__);
+ return NULL;
+ }
+
+ pc_path = list_new();
+ listnode_add(pc_path, current_vertex);
+
+ /* Generate a backup path in reverse order */
+ for (;;) {
+ parent = listnode_head(current_vertex->parents);
+ if (!parent)
+ break;
+
+ listnode_add(pc_path, parent->parent);
+ current_vertex = parent->parent;
+ }
+
+ return pc_path;
+}
+
+static void ospf_ti_lfa_generate_q_spaces(struct ospf_area *area,
+ struct p_space *p_space,
+ struct vertex *dest, bool recursive,
+ struct list *pc_path)
+{
+ struct listnode *node;
+ struct vertex *child;
+ struct route_table *new_table, *new_rtrs;
+ struct q_space *q_space, q_space_search;
+ char label_buf[MPLS_LABEL_STRLEN];
+ char res_buf[PROTECTED_RESOURCE_STRLEN];
+ bool node_protected;
+
+ ospf_print_protected_resource(p_space->protected_resource, res_buf);
+ node_protected =
+ p_space->protected_resource->type == OSPF_TI_LFA_NODE_PROTECTION
+ && dest->id.s_addr
+ == p_space->protected_resource->router_id.s_addr;
+
+ /*
+ * If node protection is used, don't build a Q space for the protected
+ * node of that particular P space. Move on with children instead.
+ */
+ if (node_protected) {
+ if (recursive) {
+ /* Recursively generate Q spaces for all children */
+ for (ALL_LIST_ELEMENTS_RO(dest->children, node, child))
+ ospf_ti_lfa_generate_q_spaces(area, p_space,
+ child, recursive,
+ pc_path);
+ }
+ return;
+ }
+
+ /* Check if we already have a Q space for this destination */
+ q_space_search.root = dest;
+ if (q_spaces_find(p_space->q_spaces, &q_space_search))
+ return;
+
+ q_space = XCALLOC(MTYPE_OSPF_Q_SPACE, sizeof(struct q_space));
+ q_space->p_node_info = XCALLOC(MTYPE_OSPF_Q_SPACE,
+ sizeof(struct ospf_ti_lfa_node_info));
+ q_space->q_node_info = XCALLOC(MTYPE_OSPF_Q_SPACE,
+ sizeof(struct ospf_ti_lfa_node_info));
+
+ new_table = route_table_init();
+ new_rtrs = route_table_init();
+
+ /*
+ * Generate a new (reversed!) SPF tree for this vertex,
+ * dry run true, root node false
+ */
+ area->spf_reversed = true;
+ ospf_spf_calculate(area, dest->lsa_p, new_table, new_rtrs, true, false);
+
+ /* Reset the flag for reverse SPF */
+ area->spf_reversed = false;
+
+ q_space->root = area->spf;
+ q_space->vertex_list = area->spf_vertex_list;
+ q_space->label_stack = NULL;
+
+ if (pc_path)
+ q_space->pc_path = ospf_ti_lfa_map_path_to_pc_vertices(
+ pc_path, p_space->pc_vertex_list);
+ else
+ q_space->pc_path = ospf_ti_lfa_generate_post_convergence_path(
+ p_space->pc_vertex_list, q_space->root);
+
+ /* If there's no backup path available then we are done here. */
+ if (!q_space->pc_path) {
+ zlog_info(
+ "%s: NO backup path found for root %pI4 and destination %pI4 for %s, aborting ...",
+ __func__, &p_space->root->id, &q_space->root->id,
+ res_buf);
+ return;
+ }
+
+ /* 'Cut' the protected resource out of the new SPF tree */
+ ospf_spf_remove_resource(q_space->root, q_space->vertex_list,
+ p_space->protected_resource);
+
+ /*
+ * Generate the smallest possible label stack from the root of the P
+ * space to the root of the Q space.
+ */
+ ospf_ti_lfa_generate_label_stack(area, p_space, q_space);
+
+ if (q_space->label_stack) {
+ mpls_label2str(q_space->label_stack->num_labels,
+ q_space->label_stack->label, label_buf,
+ MPLS_LABEL_STRLEN, true);
+ zlog_info(
+ "%s: Generated label stack %s for root %pI4 and destination %pI4 for %s",
+ __func__, label_buf, &p_space->root->id,
+ &q_space->root->id, res_buf);
+ } else {
+ zlog_info(
+ "%s: NO label stack generated for root %pI4 and destination %pI4 for %s",
+ __func__, &p_space->root->id, &q_space->root->id,
+ res_buf);
+ }
+
+ /* We are finished, store the new Q space in the P space struct */
+ q_spaces_add(p_space->q_spaces, q_space);
+
+ /* Recursively generate Q spaces for all children */
+ if (recursive) {
+ for (ALL_LIST_ELEMENTS_RO(dest->children, node, child))
+ ospf_ti_lfa_generate_q_spaces(area, p_space, child,
+ recursive, pc_path);
+ }
+}
+
+static void ospf_ti_lfa_generate_post_convergence_spf(struct ospf_area *area,
+ struct p_space *p_space)
+{
+ struct route_table *new_table, *new_rtrs;
+
+ new_table = route_table_init();
+ new_rtrs = route_table_init();
+
+ area->spf_protected_resource = p_space->protected_resource;
+
+ /*
+ * The 'post convergence' SPF tree is generated here
+ * dry run true, root node false
+ *
+ * So how does this work? During the SPF calculation the algorithm
+ * checks if a link belongs to a protected resource and then just
+ * ignores it.
+ * This is actually _NOT_ a good way to calculate the post
+ * convergence SPF tree. The preferred way would be to delete the
+ * relevant links (and nodes) from a copy of the LSDB and then just run
+ * the SPF algorithm on that as usual.
+ * However, removing links from router LSAs appears to be its own
+ * endeavour (because LSAs are stored as a 'raw' stream), so we go with
+ * this rather hacky way for now.
+ */
+ ospf_spf_calculate(area, area->router_lsa_self, new_table, new_rtrs,
+ true, false);
+
+ p_space->pc_spf = area->spf;
+ p_space->pc_vertex_list = area->spf_vertex_list;
+
+ area->spf_protected_resource = NULL;
+}
+
+static void
+ospf_ti_lfa_generate_p_space(struct ospf_area *area, struct vertex *child,
+ struct protected_resource *protected_resource,
+ bool recursive, struct list *pc_path)
+{
+ struct vertex *spf_orig;
+ struct list *vertex_list, *vertex_list_orig;
+ struct p_space *p_space;
+
+ p_space = XCALLOC(MTYPE_OSPF_P_SPACE, sizeof(struct p_space));
+ vertex_list = list_new();
+
+ /* The P-space will get its own SPF tree, so copy the old one */
+ ospf_spf_copy(area->spf, vertex_list);
+ p_space->root = listnode_head(vertex_list);
+ p_space->vertex_list = vertex_list;
+ p_space->protected_resource = protected_resource;
+
+ /* Initialize the Q spaces for this P space and protected resource */
+ p_space->q_spaces =
+ XCALLOC(MTYPE_OSPF_Q_SPACE, sizeof(struct q_spaces_head));
+ q_spaces_init(p_space->q_spaces);
+
+ /* 'Cut' the protected resource out of the new SPF tree */
+ ospf_spf_remove_resource(p_space->root, p_space->vertex_list,
+ p_space->protected_resource);
+
+ /*
+ * Since we are going to calculate more SPF trees for Q spaces, keep the
+ * 'original' one here temporarily
+ */
+ spf_orig = area->spf;
+ vertex_list_orig = area->spf_vertex_list;
+
+ /* Generate the post convergence SPF as a blueprint for backup paths */
+ ospf_ti_lfa_generate_post_convergence_spf(area, p_space);
+
+ /* Generate the relevant Q spaces for this particular P space */
+ ospf_ti_lfa_generate_q_spaces(area, p_space, child, recursive, pc_path);
+
+ /* Put the 'original' SPF tree back in place */
+ area->spf = spf_orig;
+ area->spf_vertex_list = vertex_list_orig;
+
+ /* We are finished, store the new P space */
+ p_spaces_add(area->p_spaces, p_space);
+}
+
+void ospf_ti_lfa_generate_p_spaces(struct ospf_area *area,
+ enum protection_type protection_type)
+{
+ struct listnode *node, *inner_node;
+ struct vertex *root, *child;
+ struct vertex_parent *vertex_parent;
+ uint8_t *p, *lim;
+ struct router_lsa_link *l = NULL;
+ struct prefix stub_prefix, child_prefix;
+ struct protected_resource *protected_resource;
+
+ area->p_spaces =
+ XCALLOC(MTYPE_OSPF_P_SPACE, sizeof(struct p_spaces_head));
+ p_spaces_init(area->p_spaces);
+
+ root = area->spf;
+
+ /* Root or its router LSA was not created yet? */
+ if (!root || !root->lsa)
+ return;
+
+ stub_prefix.family = AF_INET;
+ child_prefix.family = AF_INET;
+ child_prefix.prefixlen = IPV4_MAX_PREFIXLEN;
+
+ p = ((uint8_t *)root->lsa) + OSPF_LSA_HEADER_SIZE + 4;
+ lim = ((uint8_t *)root->lsa) + ntohs(root->lsa->length);
+
+ zlog_info("%s: Generating P spaces for area %pI4", __func__,
+ &area->area_id);
+
+ /*
+ * Iterate over all stub networks which target other OSPF neighbors.
+ * Check the nexthop of the child vertex if a stub network is relevant.
+ */
+ while (p < lim) {
+ l = (struct router_lsa_link *)p;
+ p += (OSPF_ROUTER_LSA_LINK_SIZE
+ + (l->m[0].tos_count * OSPF_ROUTER_LSA_TOS_SIZE));
+
+ /* First comes node protection */
+ if (protection_type == OSPF_TI_LFA_NODE_PROTECTION) {
+ if (l->m[0].type == LSA_LINK_TYPE_POINTOPOINT) {
+ protected_resource = XCALLOC(
+ MTYPE_OSPF_P_SPACE,
+ sizeof(struct protected_resource));
+ protected_resource->type = protection_type;
+ protected_resource->router_id = l->link_id;
+ child = ospf_spf_vertex_find(
+ protected_resource->router_id,
+ root->children);
+ if (child)
+ ospf_ti_lfa_generate_p_space(
+ area, child, protected_resource,
+ true, NULL);
+ }
+
+ continue;
+ }
+
+ /* The rest is about link protection */
+ if (protection_type != OSPF_TI_LFA_LINK_PROTECTION)
+ continue;
+
+ if (l->m[0].type != LSA_LINK_TYPE_STUB)
+ continue;
+
+ stub_prefix.prefixlen = ip_masklen(l->link_data);
+ stub_prefix.u.prefix4 = l->link_id;
+
+ for (ALL_LIST_ELEMENTS_RO(root->children, node, child)) {
+
+ if (child->type != OSPF_VERTEX_ROUTER)
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO(child->parents, inner_node,
+ vertex_parent)) {
+
+ child_prefix.u.prefix4 =
+ vertex_parent->nexthop->router;
+
+ /*
+ * If there's a link for that stub network then
+ * we will protect it. Hence generate a P space
+ * for that particular link including the
+ * Q spaces so we can later on generate a
+ * backup path for the link.
+ */
+ if (prefix_match(&stub_prefix, &child_prefix)) {
+ zlog_info(
+ "%s: Generating P space for %pI4",
+ __func__, &l->link_id);
+
+ protected_resource = XCALLOC(
+ MTYPE_OSPF_P_SPACE,
+ sizeof(struct
+ protected_resource));
+ protected_resource->type =
+ protection_type;
+ protected_resource->link = l;
+
+ ospf_ti_lfa_generate_p_space(
+ area, child, protected_resource,
+ true, NULL);
+ }
+ }
+ }
+ }
+}
+
+static struct p_space *ospf_ti_lfa_get_p_space_by_path(struct ospf_area *area,
+ struct ospf_path *path)
+{
+ struct p_space *p_space;
+ struct router_lsa_link *link;
+ struct vertex *child;
+ int type;
+
+ frr_each(p_spaces, area->p_spaces, p_space) {
+ type = p_space->protected_resource->type;
+
+ if (type == OSPF_TI_LFA_LINK_PROTECTION) {
+ link = p_space->protected_resource->link;
+ if ((path->nexthop.s_addr & link->link_data.s_addr)
+ == (link->link_id.s_addr & link->link_data.s_addr))
+ return p_space;
+ }
+
+ if (type == OSPF_TI_LFA_NODE_PROTECTION) {
+ child = ospf_spf_vertex_by_nexthop(area->spf,
+ &path->nexthop);
+ if (child
+ && p_space->protected_resource->router_id.s_addr
+ == child->id.s_addr)
+ return p_space;
+ }
+ }
+
+ return NULL;
+}
+
+void ospf_ti_lfa_insert_backup_paths(struct ospf_area *area,
+ struct route_table *new_table)
+{
+ struct route_node *rn;
+ struct ospf_route *or;
+ struct ospf_path *path;
+ struct listnode *node;
+ struct p_space *p_space;
+ struct q_space *q_space, q_space_search;
+ struct vertex root_search;
+ char label_buf[MPLS_LABEL_STRLEN];
+
+ for (rn = route_top(new_table); rn; rn = route_next(rn)) {
+ or = rn->info;
+ if (or == NULL)
+ continue;
+
+ /* Insert a backup path for all OSPF paths */
+ for (ALL_LIST_ELEMENTS_RO(or->paths, node, path)) {
+
+ if (path->adv_router.s_addr == INADDR_ANY
+ || path->nexthop.s_addr == INADDR_ANY)
+ continue;
+
+ if (IS_DEBUG_OSPF_TI_LFA)
+ zlog_debug(
+ "%s: attempting to insert backup path for prefix %pFX, router id %pI4 and nexthop %pI4.",
+ __func__, &rn->p, &path->adv_router,
+ &path->nexthop);
+
+ p_space = ospf_ti_lfa_get_p_space_by_path(area, path);
+ if (!p_space) {
+ if (IS_DEBUG_OSPF_TI_LFA)
+ zlog_debug(
+ "%s: P space not found for router id %pI4 and nexthop %pI4.",
+ __func__, &path->adv_router,
+ &path->nexthop);
+ continue;
+ }
+
+ root_search.id = path->adv_router;
+ q_space_search.root = &root_search;
+ q_space = q_spaces_find(p_space->q_spaces,
+ &q_space_search);
+ if (!q_space) {
+ if (IS_DEBUG_OSPF_TI_LFA)
+ zlog_debug(
+ "%s: Q space not found for advertising router %pI4.",
+ __func__, &path->adv_router);
+ continue;
+ }
+
+ /* If there's a backup label stack, insert it*/
+ if (q_space->label_stack) {
+ /* Init the backup path data in path */
+ path->srni.backup_label_stack = XCALLOC(
+ MTYPE_OSPF_PATH,
+ sizeof(struct mpls_label_stack)
+ + sizeof(mpls_label_t)
+ * q_space->label_stack
+ ->num_labels);
+
+ /* Copy over the label stack */
+ path->srni.backup_label_stack->num_labels =
+ q_space->label_stack->num_labels;
+ memcpy(path->srni.backup_label_stack->label,
+ q_space->label_stack->label,
+ sizeof(mpls_label_t)
+ * q_space->label_stack
+ ->num_labels);
+
+ /* Set the backup nexthop too */
+ path->srni.backup_nexthop = q_space->nexthop;
+ }
+
+ if (path->srni.backup_label_stack) {
+ mpls_label2str(
+ path->srni.backup_label_stack
+ ->num_labels,
+ path->srni.backup_label_stack->label,
+ label_buf, MPLS_LABEL_STRLEN, true);
+ if (IS_DEBUG_OSPF_TI_LFA)
+ zlog_debug(
+ "%s: inserted backup path %s for prefix %pFX, router id %pI4 and nexthop %pI4.",
+ __func__, label_buf, &rn->p,
+ &path->adv_router,
+ &path->nexthop);
+ } else {
+ if (IS_DEBUG_OSPF_TI_LFA)
+ zlog_debug(
+ "%s: inserted NO backup path for prefix %pFX, router id %pI4 and nexthop %pI4.",
+ __func__, &rn->p,
+ &path->adv_router,
+ &path->nexthop);
+ }
+ }
+ }
+}
+
+void ospf_ti_lfa_free_p_spaces(struct ospf_area *area)
+{
+ struct p_space *p_space;
+ struct q_space *q_space;
+
+ while ((p_space = p_spaces_pop(area->p_spaces))) {
+ while ((q_space = q_spaces_pop(p_space->q_spaces))) {
+ ospf_spf_cleanup(q_space->root, q_space->vertex_list);
+
+ if (q_space->pc_path)
+ list_delete(&q_space->pc_path);
+
+ XFREE(MTYPE_OSPF_Q_SPACE, q_space->p_node_info);
+ XFREE(MTYPE_OSPF_Q_SPACE, q_space->q_node_info);
+ XFREE(MTYPE_OSPF_Q_SPACE, q_space->label_stack);
+ XFREE(MTYPE_OSPF_Q_SPACE, q_space);
+ }
+
+ ospf_spf_cleanup(p_space->root, p_space->vertex_list);
+ ospf_spf_cleanup(p_space->pc_spf, p_space->pc_vertex_list);
+ XFREE(MTYPE_OSPF_P_SPACE, p_space->protected_resource);
+
+ q_spaces_fini(p_space->q_spaces);
+ XFREE(MTYPE_OSPF_Q_SPACE, p_space->q_spaces);
+ }
+
+ p_spaces_fini(area->p_spaces);
+ XFREE(MTYPE_OSPF_P_SPACE, area->p_spaces);
+}
+
+void ospf_ti_lfa_compute(struct ospf_area *area, struct route_table *new_table,
+ enum protection_type protection_type)
+{
+ /*
+ * Generate P spaces per protected link/node and their respective Q
+ * spaces, generate backup paths (MPLS label stacks) by finding P/Q
+ * nodes.
+ */
+ ospf_ti_lfa_generate_p_spaces(area, protection_type);
+
+ /* Insert the generated backup paths into the routing table. */
+ ospf_ti_lfa_insert_backup_paths(area, new_table);
+
+ /* Cleanup P spaces and related datastructures including Q spaces. */
+ ospf_ti_lfa_free_p_spaces(area);
+}
diff --git a/ospfd/ospf_ti_lfa.h b/ospfd/ospf_ti_lfa.h
new file mode 100644
index 0000000000..bc8f19b98f
--- /dev/null
+++ b/ospfd/ospf_ti_lfa.h
@@ -0,0 +1,41 @@
+/*
+ * OSPF calculation.
+ * Copyright (C) 2020 NetDEF, Inc.
+ * Sascha Kattelmann
+ *
+ * This file is part of FRR.
+ *
+ * FRR is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRR is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _OSPF_TI_LFA_H
+#define _OSPF_TI_LFA_H
+
+#define PROTECTED_RESOURCE_STRLEN 100
+
+extern void ospf_ti_lfa_compute(struct ospf_area *area,
+ struct route_table *new_table,
+ enum protection_type protection_type);
+
+/* unit testing */
+extern void ospf_ti_lfa_generate_p_spaces(struct ospf_area *area,
+ enum protection_type protection_type);
+extern void ospf_ti_lfa_insert_backup_paths(struct ospf_area *area,
+ struct route_table *new_table);
+extern void ospf_ti_lfa_free_p_spaces(struct ospf_area *area);
+void ospf_print_protected_resource(
+ struct protected_resource *protected_resource, char *buf);
+
+#endif /* _OSPF_TI_LFA_H */
diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c
index 68ad62cda4..4132452069 100644
--- a/ospfd/ospf_vty.c
+++ b/ospfd/ospf_vty.c
@@ -2602,6 +2602,43 @@ ALIAS(no_ospf_write_multiplier, no_write_multiplier_cmd,
"Write multiplier\n"
"Maximum number of interface serviced per write\n")
+DEFUN(ospf_ti_lfa, ospf_ti_lfa_cmd, "fast-reroute ti-lfa [node-protection]",
+ "Fast Reroute for MPLS and IP resilience\n"
+ "Topology Independent LFA (Loop-Free Alternate)\n"
+ "TI-LFA node protection (default is link protection)\n")
+{
+ VTY_DECLVAR_INSTANCE_CONTEXT(ospf, ospf);
+
+ ospf->ti_lfa_enabled = true;
+
+ if (argc == 3)
+ ospf->ti_lfa_protection_type = OSPF_TI_LFA_NODE_PROTECTION;
+ else
+ ospf->ti_lfa_protection_type = OSPF_TI_LFA_LINK_PROTECTION;
+
+ ospf_spf_calculate_schedule(ospf, SPF_FLAG_CONFIG_CHANGE);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN(no_ospf_ti_lfa, no_ospf_ti_lfa_cmd,
+ "no fast-reroute ti-lfa [node-protection]",
+ NO_STR
+ "Fast Reroute for MPLS and IP resilience\n"
+ "Topology Independent LFA (Loop-Free Alternate)\n"
+ "TI-LFA node protection (default is link protection)\n")
+{
+ VTY_DECLVAR_INSTANCE_CONTEXT(ospf, ospf);
+
+ ospf->ti_lfa_enabled = false;
+
+ ospf->ti_lfa_protection_type = OSPF_TI_LFA_UNDEFINED_PROTECTION;
+
+ ospf_spf_calculate_schedule(ospf, SPF_FLAG_CONFIG_CHANGE);
+
+ return CMD_SUCCESS;
+}
+
static const char *const ospf_abr_type_descr_str[] = {
"Unknown", "Standard (RFC2328)", "Alternative IBM",
"Alternative Cisco", "Alternative Shortcut"
@@ -6369,31 +6406,7 @@ static int show_as_external_lsa_detail(struct vty *vty, struct ospf_lsa *lsa,
return 0;
}
-#if 0
-static int
-show_as_external_lsa_stdvty (struct ospf_lsa *lsa)
-{
- struct as_external_lsa *al = (struct as_external_lsa *) lsa->data;
-
- /* show_ip_ospf_database_header (vty, lsa); */
-
- zlog_debug( " Network Mask: /%d%s",
- ip_masklen (al->mask), "\n");
- zlog_debug( " Metric Type: %s%s",
- IS_EXTERNAL_METRIC (al->e[0].tos) ?
- "2 (Larger than any link state path)" : "1", "\n");
- zlog_debug( " TOS: 0%s", "\n");
- zlog_debug( " Metric: %d%s",
- GET_METRIC (al->e[0].metric), "\n");
- zlog_debug( " Forward Address: %pI4%s",
- &al->e[0].fwd_addr, "\n");
- zlog_debug( " External Route Tag: %"ROUTE_TAG_PRI"%s%s",
- (route_tag_t)ntohl (al->e[0].route_tag), "\n", "\n");
-
- return 0;
-}
-#endif
/* Show AS-NSSA-LSA detail information. */
static int show_as_nssa_lsa_detail(struct vty *vty, struct ospf_lsa *lsa,
json_object *json)
@@ -6681,8 +6694,8 @@ static void show_lsa_detail_adv_router(struct vty *vty, struct ospf *ospf,
json_lstype);
}
-static void show_ip_ospf_database_summary(struct vty *vty, struct ospf *ospf,
- int self, json_object *json)
+void show_ip_ospf_database_summary(struct vty *vty, struct ospf *ospf, int self,
+ json_object *json)
{
struct ospf_lsa *lsa;
struct route_node *rn;
@@ -8921,6 +8934,7 @@ DEFUN (no_ip_ospf_area,
struct ospf_if_params *params;
unsigned short instance = 0;
struct in_addr addr;
+ struct in_addr area_id;
if (argv_find(argv, argc, "(1-65535)", &idx))
instance = strtol(argv[idx]->arg, NULL, 10);
@@ -8948,6 +8962,7 @@ DEFUN (no_ip_ospf_area,
} else
params = IF_DEF_PARAMS(ifp);
+ area_id = params->if_area;
if (!OSPF_IF_PARAM_CONFIGURED(params, if_area)) {
vty_out(vty,
"Can't find specified interface area configuration.\n");
@@ -8963,6 +8978,7 @@ DEFUN (no_ip_ospf_area,
if (ospf) {
ospf_interface_area_unset(ospf, ifp);
ospf->if_ospf_cli_count--;
+ ospf_area_check_free(ospf, area_id);
}
return CMD_SUCCESS;
@@ -9427,78 +9443,6 @@ DEFUN (ospf_distance_ospf,
return CMD_SUCCESS;
}
-#if 0
-DEFUN (ospf_distance_source,
- ospf_distance_source_cmd,
- "distance (1-255) A.B.C.D/M",
- "Administrative distance\n"
- "Distance value\n"
- "IP source prefix\n")
-{
- VTY_DECLVAR_CONTEXT(ospf, ospf);
- int idx_number = 1;
- int idx_ipv4_prefixlen = 2;
-
- ospf_distance_set (vty, ospf, argv[idx_number]->arg, argv[idx_ipv4_prefixlen]->arg, NULL);
-
- return CMD_SUCCESS;
-}
-
-DEFUN (no_ospf_distance_source,
- no_ospf_distance_source_cmd,
- "no distance (1-255) A.B.C.D/M",
- NO_STR
- "Administrative distance\n"
- "Distance value\n"
- "IP source prefix\n")
-{
- VTY_DECLVAR_CONTEXT(ospf, ospf);
- int idx_number = 2;
- int idx_ipv4_prefixlen = 3;
-
- ospf_distance_unset (vty, ospf, argv[idx_number]->arg, argv[idx_ipv4_prefixlen]->arg, NULL);
-
- return CMD_SUCCESS;
-}
-
-DEFUN (ospf_distance_source_access_list,
- ospf_distance_source_access_list_cmd,
- "distance (1-255) A.B.C.D/M WORD",
- "Administrative distance\n"
- "Distance value\n"
- "IP source prefix\n"
- "Access list name\n")
-{
- VTY_DECLVAR_CONTEXT(ospf, ospf);
- int idx_number = 1;
- int idx_ipv4_prefixlen = 2;
- int idx_word = 3;
-
- ospf_distance_set (vty, ospf, argv[idx_number]->arg, argv[idx_ipv4_prefixlen]->arg, argv[idx_word]->arg);
-
- return CMD_SUCCESS;
-}
-
-DEFUN (no_ospf_distance_source_access_list,
- no_ospf_distance_source_access_list_cmd,
- "no distance (1-255) A.B.C.D/M WORD",
- NO_STR
- "Administrative distance\n"
- "Distance value\n"
- "IP source prefix\n"
- "Access list name\n")
-{
- VTY_DECLVAR_CONTEXT(ospf, ospf);
- int idx_number = 2;
- int idx_ipv4_prefixlen = 3;
- int idx_word = 4;
-
- ospf_distance_unset (vty, ospf, argv[idx_number]->arg, argv[idx_ipv4_prefixlen]->arg, argv[idx_word]->arg);
-
- return CMD_SUCCESS;
-}
-#endif
-
DEFUN (ip_ospf_mtu_ignore,
ip_ospf_mtu_ignore_addr_cmd,
"ip ospf mtu-ignore [A.B.C.D]",
@@ -9978,10 +9922,10 @@ DEFPY(no_ospf_gr_helper_planned_only,
return CMD_SUCCESS;
}
-static int ospf_print_vty_helper_dis_rtr_walkcb(struct hash_bucket *backet,
+static int ospf_print_vty_helper_dis_rtr_walkcb(struct hash_bucket *bucket,
void *arg)
{
- struct advRtr *rtr = backet->data;
+ struct advRtr *rtr = bucket->data;
struct vty *vty = (struct vty *)arg;
static unsigned int count;
@@ -11299,10 +11243,10 @@ static const char *const ospf_abr_type_str[] = {
static const char *const ospf_shortcut_mode_str[] = {
"default", "enable", "disable"
};
-static int ospf_vty_external_rt_walkcb(struct hash_bucket *backet,
+static int ospf_vty_external_rt_walkcb(struct hash_bucket *bucket,
void *arg)
{
- struct external_info *ei = backet->data;
+ struct external_info *ei = bucket->data;
struct vty *vty = (struct vty *)arg;
static unsigned int count;
@@ -11318,10 +11262,10 @@ static int ospf_vty_external_rt_walkcb(struct hash_bucket *backet,
return HASHWALK_CONTINUE;
}
-static int ospf_json_external_rt_walkcb(struct hash_bucket *backet,
+static int ospf_json_external_rt_walkcb(struct hash_bucket *bucket,
void *arg)
{
- struct external_info *ei = backet->data;
+ struct external_info *ei = bucket->data;
struct json_object *json = (struct json_object *)arg;
char buf[PREFIX2STR_BUFFER];
char exnalbuf[20];
@@ -12090,10 +12034,10 @@ static int config_write_ospf_redistribute(struct vty *vty, struct ospf *ospf)
return 0;
}
-static int ospf_cfg_write_helper_dis_rtr_walkcb(struct hash_bucket *backet,
+static int ospf_cfg_write_helper_dis_rtr_walkcb(struct hash_bucket *bucket,
void *arg)
{
- struct advRtr *rtr = backet->data;
+ struct advRtr *rtr = bucket->data;
struct vty *vty = (struct vty *)arg;
vty_out(vty, " graceful-restart helper-only %pI4\n",
@@ -12361,6 +12305,14 @@ static int ospf_config_write_one(struct vty *vty, struct ospf *ospf)
oi->ifp->name, &oi->address->u.prefix4);
}
+ /* TI-LFA print. */
+ if (ospf->ti_lfa_enabled) {
+ if (ospf->ti_lfa_protection_type == OSPF_TI_LFA_NODE_PROTECTION)
+ vty_out(vty, " fast-reroute ti-lfa node-protection\n");
+ else
+ vty_out(vty, " fast-reroute ti-lfa\n");
+ }
+
/* Network area print. */
config_write_network_area(vty, ospf);
@@ -12605,13 +12557,6 @@ static void ospf_vty_zebra_init(void)
&no_ospf_external_route_aggregation_no_adrvertise_cmd);
install_element(OSPF_NODE, &ospf_route_aggregation_timer_cmd);
install_element(OSPF_NODE, &no_ospf_route_aggregation_timer_cmd);
-
-#if 0
- install_element (OSPF_NODE, &ospf_distance_source_cmd);
- install_element (OSPF_NODE, &no_ospf_distance_source_cmd);
- install_element (OSPF_NODE, &ospf_distance_source_access_list_cmd);
- install_element (OSPF_NODE, &no_ospf_distance_source_access_list_cmd);
-#endif /* 0 */
}
static int ospf_config_write(struct vty *vty);
@@ -12825,6 +12770,10 @@ void ospf_vty_init(void)
install_element(OSPF_NODE, &ospf_proactive_arp_cmd);
install_element(OSPF_NODE, &no_ospf_proactive_arp_cmd);
+ /* TI-LFA commands */
+ install_element(OSPF_NODE, &ospf_ti_lfa_cmd);
+ install_element(OSPF_NODE, &no_ospf_ti_lfa_cmd);
+
/* Init interface related vty commands. */
ospf_vty_if_init();
diff --git a/ospfd/ospf_vty.h b/ospfd/ospf_vty.h
index 79aabe7b4e..bf9c971710 100644
--- a/ospfd/ospf_vty.h
+++ b/ospfd/ospf_vty.h
@@ -54,4 +54,8 @@ extern void ospf_vty_show_init(void);
extern void ospf_vty_clear_init(void);
extern int str2area_id(const char *, struct in_addr *, int *);
+/* unit tests */
+void show_ip_ospf_database_summary(struct vty *vty, struct ospf *ospf, int self,
+ json_object *json);
+
#endif /* _QUAGGA_OSPF_VTY_H */
diff --git a/ospfd/ospf_zebra.c b/ospfd/ospf_zebra.c
index 2d02619ae3..aaab274570 100644
--- a/ospfd/ospf_zebra.c
+++ b/ospfd/ospf_zebra.c
@@ -198,15 +198,70 @@ static int ospf_interface_vrf_update(ZAPI_CALLBACK_ARGS)
return 0;
}
+/* Nexthop, ifindex, distance and metric information. */
+static void ospf_zebra_add_nexthop(struct ospf *ospf, struct ospf_path *path,
+ struct zapi_route *api)
+{
+ struct zapi_nexthop *api_nh;
+ struct zapi_nexthop *api_nh_backup;
+
+ /* TI-LFA backup path label stack comes first, if present */
+ if (path->srni.backup_label_stack) {
+ api_nh_backup = &api->backup_nexthops[api->backup_nexthop_num];
+ api_nh_backup->vrf_id = ospf->vrf_id;
+
+ api_nh_backup->type = NEXTHOP_TYPE_IPV4;
+ api_nh_backup->gate.ipv4 = path->srni.backup_nexthop;
+
+ api_nh_backup->label_num =
+ path->srni.backup_label_stack->num_labels;
+ memcpy(api_nh_backup->labels,
+ path->srni.backup_label_stack->label,
+ sizeof(mpls_label_t) * api_nh_backup->label_num);
+
+ api->backup_nexthop_num++;
+ }
+
+ /* And here comes the primary nexthop */
+ api_nh = &api->nexthops[api->nexthop_num];
+#ifdef HAVE_NETLINK
+ if (path->unnumbered
+ || (path->nexthop.s_addr != INADDR_ANY && path->ifindex != 0)) {
+#else /* HAVE_NETLINK */
+ if (path->nexthop.s_addr != INADDR_ANY && path->ifindex != 0) {
+#endif /* HAVE_NETLINK */
+ api_nh->gate.ipv4 = path->nexthop;
+ api_nh->ifindex = path->ifindex;
+ api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
+ } else if (path->nexthop.s_addr != INADDR_ANY) {
+ api_nh->gate.ipv4 = path->nexthop;
+ api_nh->type = NEXTHOP_TYPE_IPV4;
+ } else {
+ api_nh->ifindex = path->ifindex;
+ api_nh->type = NEXTHOP_TYPE_IFINDEX;
+ }
+ api_nh->vrf_id = ospf->vrf_id;
+
+ /* Set TI-LFA backup nexthop info if present */
+ if (path->srni.backup_label_stack) {
+ SET_FLAG(api->message, ZAPI_MESSAGE_BACKUP_NEXTHOPS);
+ SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP);
+
+ /* Just care about a single TI-LFA backup path for now */
+ api_nh->backup_num = 1;
+ api_nh->backup_idx[0] = api->backup_nexthop_num - 1;
+ }
+
+ api->nexthop_num++;
+}
+
void ospf_zebra_add(struct ospf *ospf, struct prefix_ipv4 *p,
struct ospf_route * or)
{
struct zapi_route api;
- struct zapi_nexthop *api_nh;
uint8_t distance;
struct ospf_path *path;
struct listnode *node;
- int count = 0;
memset(&api, 0, sizeof(api));
api.vrf_id = ospf->vrf_id;
@@ -241,29 +296,11 @@ void ospf_zebra_add(struct ospf *ospf, struct prefix_ipv4 *p,
api.distance = distance;
}
- /* Nexthop, ifindex, distance and metric information. */
for (ALL_LIST_ELEMENTS_RO(or->paths, node, path)) {
- if (count >= MULTIPATH_NUM)
+ if (api.nexthop_num >= MULTIPATH_NUM)
break;
- api_nh = &api.nexthops[count];
-#ifdef HAVE_NETLINK
- if (path->unnumbered || (path->nexthop.s_addr != INADDR_ANY
- && path->ifindex != 0)) {
-#else /* HAVE_NETLINK */
- if (path->nexthop.s_addr != INADDR_ANY && path->ifindex != 0) {
-#endif /* HAVE_NETLINK */
- api_nh->gate.ipv4 = path->nexthop;
- api_nh->ifindex = path->ifindex;
- api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
- } else if (path->nexthop.s_addr != INADDR_ANY) {
- api_nh->gate.ipv4 = path->nexthop;
- api_nh->type = NEXTHOP_TYPE_IPV4;
- } else {
- api_nh->ifindex = path->ifindex;
- api_nh->type = NEXTHOP_TYPE_IFINDEX;
- }
- api_nh->vrf_id = ospf->vrf_id;
- count++;
+
+ ospf_zebra_add_nexthop(ospf, path, &api);
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
struct interface *ifp;
@@ -276,7 +313,6 @@ void ospf_zebra_add(struct ospf *ospf, struct prefix_ipv4 *p,
ifp ? ifp->name : " ");
}
}
- api.nexthop_num = count;
zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
}
@@ -501,12 +537,10 @@ void ospf_zebra_update_prefix_sid(const struct sr_prefix *srp)
{
struct zapi_labels zl;
struct zapi_nexthop *znh;
+ struct zapi_nexthop *znh_backup;
struct listnode *node;
struct ospf_path *path;
- osr_debug("SR (%s): Update Labels %u for Prefix %pFX", __func__,
- srp->label_in, (struct prefix *)&srp->prefv4);
-
/* Prepare message. */
memset(&zl, 0, sizeof(zl));
zl.type = ZEBRA_LSP_OSPF_SR;
@@ -520,6 +554,11 @@ void ospf_zebra_update_prefix_sid(const struct sr_prefix *srp)
znh->ifindex = srp->nhlfe.ifindex;
znh->label_num = 1;
znh->labels[0] = srp->nhlfe.label_out;
+
+ osr_debug("SR (%s): Configure Prefix %pFX with labels %u/%u",
+ __func__, (struct prefix *)&srp->prefv4,
+ srp->label_in, srp->nhlfe.label_out);
+
break;
case PREF_SID:
@@ -535,6 +574,10 @@ void ospf_zebra_update_prefix_sid(const struct sr_prefix *srp)
if (srp->route == NULL) {
return;
}
+
+ osr_debug("SR (%s): Configure Prefix %pFX with",
+ __func__, (struct prefix *)&srp->prefv4);
+
for (ALL_LIST_ELEMENTS_RO(srp->route->paths, node, path)) {
if (path->srni.label_out == MPLS_INVALID_LABEL)
continue;
@@ -542,12 +585,56 @@ void ospf_zebra_update_prefix_sid(const struct sr_prefix *srp)
if (zl.nexthop_num >= MULTIPATH_NUM)
break;
+ /*
+ * TI-LFA backup path label stack comes first, if
+ * present.
+ */
+ if (path->srni.backup_label_stack) {
+ znh_backup = &zl.backup_nexthops
+ [zl.backup_nexthop_num++];
+ znh_backup->type = NEXTHOP_TYPE_IPV4;
+ znh_backup->gate.ipv4 =
+ path->srni.backup_nexthop;
+
+ memcpy(znh_backup->labels,
+ path->srni.backup_label_stack->label,
+ sizeof(mpls_label_t)
+ * path->srni.backup_label_stack
+ ->num_labels);
+
+ znh_backup->label_num =
+ path->srni.backup_label_stack
+ ->num_labels;
+ if (path->srni.label_out
+ != MPLS_LABEL_IPV4_EXPLICIT_NULL
+ && path->srni.label_out
+ != MPLS_LABEL_IMPLICIT_NULL)
+ znh_backup->labels
+ [znh_backup->label_num++] =
+ path->srni.label_out;
+ }
+
znh = &zl.nexthops[zl.nexthop_num++];
znh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
znh->gate.ipv4 = path->nexthop;
znh->ifindex = path->ifindex;
znh->label_num = 1;
znh->labels[0] = path->srni.label_out;
+
+ osr_debug(" |- labels %u/%u", srp->label_in,
+ srp->nhlfe.label_out);
+
+ /* Set TI-LFA backup nexthop info if present */
+ if (path->srni.backup_label_stack) {
+ SET_FLAG(zl.message, ZAPI_LABELS_HAS_BACKUPS);
+ SET_FLAG(znh->flags,
+ ZAPI_NEXTHOP_FLAG_HAS_BACKUP);
+
+ /* Just care about a single TI-LFA backup path
+ * for now */
+ znh->backup_num = 1;
+ znh->backup_idx[0] = zl.backup_nexthop_num - 1;
+ }
}
break;
default:
diff --git a/ospfd/ospfd.c b/ospfd/ospfd.c
index bab75995b7..04397d50a5 100644
--- a/ospfd/ospfd.c
+++ b/ospfd/ospfd.c
@@ -87,6 +87,30 @@ static void ospf_finish_final(struct ospf *);
#define OSPF_EXTERNAL_LSA_ORIGINATE_DELAY 1
+int p_spaces_compare_func(const struct p_space *a, const struct p_space *b)
+{
+ if (a->protected_resource->type == OSPF_TI_LFA_LINK_PROTECTION
+ && b->protected_resource->type == OSPF_TI_LFA_LINK_PROTECTION)
+ return (a->protected_resource->link->link_id.s_addr
+ - b->protected_resource->link->link_id.s_addr);
+
+ if (a->protected_resource->type == OSPF_TI_LFA_NODE_PROTECTION
+ && b->protected_resource->type == OSPF_TI_LFA_NODE_PROTECTION)
+ return (a->protected_resource->router_id.s_addr
+ - b->protected_resource->router_id.s_addr);
+
+ /* This should not happen */
+ return 0;
+}
+
+int q_spaces_compare_func(const struct q_space *a, const struct q_space *b)
+{
+ return (a->root->id.s_addr - b->root->id.s_addr);
+}
+
+DECLARE_RBTREE_UNIQ(p_spaces, struct p_space, p_spaces_item,
+ p_spaces_compare_func)
+
void ospf_process_refresh_data(struct ospf *ospf, bool reset)
{
struct vrf *vrf = vrf_lookup_by_id(ospf->vrf_id);
@@ -264,8 +288,7 @@ static int ospf_area_id_cmp(struct ospf_area *a1, struct ospf_area *a2)
return 0;
}
-/* Allocate new ospf structure. */
-static struct ospf *ospf_new(unsigned short instance, const char *name)
+struct ospf *ospf_new_alloc(unsigned short instance, const char *name)
{
int i;
struct vrf *vrf = NULL;
@@ -340,8 +363,6 @@ static struct ospf *ospf_new(unsigned short instance, const char *name)
new->maxage_delay = OSPF_LSA_MAXAGE_REMOVE_DELAY_DEFAULT;
new->maxage_lsa = route_table_init();
new->t_maxage_walker = NULL;
- thread_add_timer(master, ospf_lsa_maxage_walker, new,
- OSPF_LSA_MAXAGE_CHECK_INTERVAL, &new->t_maxage_walker);
/* Distance table init. */
new->distance_table = route_table_init();
@@ -349,8 +370,6 @@ static struct ospf *ospf_new(unsigned short instance, const char *name)
new->lsa_refresh_queue.index = 0;
new->lsa_refresh_interval = OSPF_LSA_REFRESH_INTERVAL_DEFAULT;
new->t_lsa_refresher = NULL;
- thread_add_timer(master, ospf_lsa_refresh_walker, new,
- new->lsa_refresh_interval, &new->t_lsa_refresher);
new->lsa_refresher_started = monotime(NULL);
new->ibuf = stream_new(OSPF_MAX_PACKET_SIZE + 1);
@@ -368,6 +387,17 @@ static struct ospf *ospf_new(unsigned short instance, const char *name)
QOBJ_REG(new, ospf);
new->fd = -1;
+
+ return new;
+}
+
+/* Allocate new ospf structure. */
+static struct ospf *ospf_new(unsigned short instance, const char *name)
+{
+ struct ospf *new;
+
+ new = ospf_new_alloc(instance, name);
+
if ((ospf_sock_init(new)) < 0) {
if (new->vrf_id != VRF_UNKNOWN)
flog_err(
@@ -376,6 +406,12 @@ static struct ospf *ospf_new(unsigned short instance, const char *name)
__func__);
return new;
}
+
+ thread_add_timer(master, ospf_lsa_maxage_walker, new,
+ OSPF_LSA_MAXAGE_CHECK_INTERVAL, &new->t_maxage_walker);
+ thread_add_timer(master, ospf_lsa_refresh_walker, new,
+ new->lsa_refresh_interval, &new->t_lsa_refresher);
+
thread_add_read(master, ospf_read, new, new->fd, &new->t_read);
return new;
@@ -887,8 +923,7 @@ static void ospf_finish_final(struct ospf *ospf)
/* allocate new OSPF Area object */
-static struct ospf_area *ospf_area_new(struct ospf *ospf,
- struct in_addr area_id)
+struct ospf_area *ospf_area_new(struct ospf *ospf, struct in_addr area_id)
{
struct ospf_area *new;
@@ -1035,7 +1070,8 @@ void ospf_area_del_if(struct ospf_area *area, struct ospf_interface *oi)
}
-static void add_ospf_interface(struct connected *co, struct ospf_area *area)
+struct ospf_interface *add_ospf_interface(struct connected *co,
+ struct ospf_area *area)
{
struct ospf_interface *oi;
@@ -1072,6 +1108,8 @@ static void add_ospf_interface(struct connected *co, struct ospf_area *area)
if ((area->ospf->router_id.s_addr != INADDR_ANY)
&& if_is_operative(co->ifp))
ospf_if_up(oi);
+
+ return oi;
}
static void update_redistributed(struct ospf *ospf, int add_to_ospf)
@@ -1701,26 +1739,6 @@ int ospf_area_nssa_translator_role_set(struct ospf *ospf,
return 1;
}
-#if 0
-/* XXX: unused? Leave for symmetry? */
-static int
-ospf_area_nssa_translator_role_unset (struct ospf *ospf,
- struct in_addr area_id)
-{
- struct ospf_area *area;
-
- area = ospf_area_lookup_by_area_id (ospf, area_id);
- if (area == NULL)
- return 0;
-
- area->NSSATranslatorRole = OSPF_NSSA_ROLE_CANDIDATE;
-
- ospf_area_check_free (ospf, area_id);
-
- return 1;
-}
-#endif
-
int ospf_area_export_list_set(struct ospf *ospf, struct ospf_area *area,
const char *list_name)
{
@@ -1963,35 +1981,6 @@ struct ospf_nbr_nbma *ospf_nbr_nbma_lookup(struct ospf *ospf,
return NULL;
}
-struct ospf_nbr_nbma *ospf_nbr_nbma_lookup_next(struct ospf *ospf,
- struct in_addr *addr, int first)
-{
-#if 0
- struct ospf_nbr_nbma *nbr_nbma;
- struct listnode *node;
-#endif
-
- if (ospf == NULL)
- return NULL;
-
-#if 0
- for (ALL_LIST_ELEMENTS_RO (ospf->nbr_nbma, node, nbr_nbma))
- {
- if (first)
- {
- *addr = nbr_nbma->addr;
- return nbr_nbma;
- }
- else if (ntohl (nbr_nbma->addr.s_addr) > ntohl (addr->s_addr))
- {
- *addr = nbr_nbma->addr;
- return nbr_nbma;
- }
- }
-#endif
- return NULL;
-}
-
int ospf_nbr_nbma_set(struct ospf *ospf, struct in_addr nbr_addr)
{
struct ospf_nbr_nbma *nbr_nbma;
diff --git a/ospfd/ospfd.h b/ospfd/ospfd.h
index 6960d151c2..954a469b68 100644
--- a/ospfd/ospfd.h
+++ b/ospfd/ospfd.h
@@ -23,6 +23,7 @@
#define _ZEBRA_OSPFD_H
#include <zebra.h>
+#include "typesafe.h"
#include "qobj.h"
#include "libospf.h"
#include "ldp_sync.h"
@@ -126,6 +127,13 @@ enum {
OSPF_LOG_ADJACENCY_DETAIL = (1 << 4),
};
+/* TI-LFA */
+enum protection_type {
+ OSPF_TI_LFA_UNDEFINED_PROTECTION,
+ OSPF_TI_LFA_LINK_PROTECTION,
+ OSPF_TI_LFA_NODE_PROTECTION,
+};
+
/* OSPF instance structure. */
struct ospf {
/* OSPF's running state based on the '[no] router ospf [<instance>]'
@@ -374,10 +382,71 @@ struct ospf {
/* MPLS LDP-IGP Sync */
struct ldp_sync_info_cmd ldp_sync_cmd;
+ /* TI-LFA support for all interfaces. */
+ bool ti_lfa_enabled;
+ enum protection_type ti_lfa_protection_type;
+
QOBJ_FIELDS
};
DECLARE_QOBJ_TYPE(ospf)
+enum ospf_ti_lfa_p_q_space_adjacency {
+ OSPF_TI_LFA_P_Q_SPACE_ADJACENT,
+ OSPF_TI_LFA_P_Q_SPACE_NON_ADJACENT,
+};
+
+enum ospf_ti_lfa_node_type {
+ OSPF_TI_LFA_UNDEFINED_NODE,
+ OSPF_TI_LFA_PQ_NODE,
+ OSPF_TI_LFA_P_NODE,
+ OSPF_TI_LFA_Q_NODE,
+};
+
+struct ospf_ti_lfa_node_info {
+ struct vertex *node;
+ enum ospf_ti_lfa_node_type type;
+ struct in_addr nexthop;
+};
+
+struct ospf_ti_lfa_inner_backup_path_info {
+ struct ospf_ti_lfa_node_info p_node_info;
+ struct ospf_ti_lfa_node_info q_node_info;
+ struct mpls_label_stack *label_stack;
+};
+
+struct protected_resource {
+ enum protection_type type;
+
+ /* Link Protection */
+ struct router_lsa_link *link;
+
+ /* Node Protection */
+ struct in_addr router_id;
+};
+
+PREDECL_RBTREE_UNIQ(q_spaces)
+struct q_space {
+ struct vertex *root;
+ struct list *vertex_list;
+ struct mpls_label_stack *label_stack;
+ struct in_addr nexthop;
+ struct list *pc_path;
+ struct ospf_ti_lfa_node_info *p_node_info;
+ struct ospf_ti_lfa_node_info *q_node_info;
+ struct q_spaces_item q_spaces_item;
+};
+
+PREDECL_RBTREE_UNIQ(p_spaces)
+struct p_space {
+ struct vertex *root;
+ struct protected_resource *protected_resource;
+ struct q_spaces_head *q_spaces;
+ struct list *vertex_list;
+ struct vertex *pc_spf;
+ struct list *pc_vertex_list;
+ struct p_spaces_item p_spaces_item;
+};
+
/* OSPF area structure. */
struct ospf_area {
/* OSPF instance. */
@@ -475,6 +544,12 @@ struct ospf_area {
bool spf_root_node; /* flag for checking if the calculating node is the
root node of the SPF tree */
+ /* TI-LFA protected link for SPF calculations */
+ struct protected_resource *spf_protected_resource;
+
+ /* P/Q spaces for TI-LFA */
+ struct p_spaces_head *p_spaces;
+
/* Threads. */
struct thread *t_stub_router; /* Stub-router timer */
struct thread *t_opaque_lsa_self; /* Type-10 Opaque-LSAs origin. */
@@ -482,6 +557,9 @@ struct ospf_area {
/* Statistics field. */
uint32_t spf_calculation; /* SPF Calculation Count. */
+ /* reverse SPF (used for TI-LFA Q spaces) */
+ bool spf_reversed;
+
/* Time stamps. */
struct timeval ts_spf; /* SPF calculation time stamp. */
@@ -566,6 +644,7 @@ extern const char *ospf_redist_string(unsigned int route_type);
extern struct ospf *ospf_lookup_instance(unsigned short);
extern struct ospf *ospf_get(unsigned short instance, const char *name,
bool *created);
+extern struct ospf *ospf_new_alloc(unsigned short instance, const char *name);
extern struct ospf *ospf_get_instance(unsigned short, bool *created);
extern struct ospf *ospf_lookup_by_inst_name(unsigned short instance,
const char *name);
@@ -615,10 +694,10 @@ extern void ospf_terminate(void);
extern void ospf_nbr_nbma_if_update(struct ospf *, struct ospf_interface *);
extern struct ospf_nbr_nbma *ospf_nbr_nbma_lookup(struct ospf *,
struct in_addr);
-extern struct ospf_nbr_nbma *ospf_nbr_nbma_lookup_next(struct ospf *,
- struct in_addr *, int);
extern int ospf_oi_count(struct interface *);
+extern struct ospf_area *ospf_area_new(struct ospf *ospf,
+ struct in_addr area_id);
extern struct ospf_area *ospf_area_get(struct ospf *, struct in_addr);
extern void ospf_area_check_free(struct ospf *, struct in_addr);
extern struct ospf_area *ospf_area_lookup_by_area_id(struct ospf *,
@@ -640,4 +719,11 @@ const char *ospf_vrf_id_to_name(vrf_id_t vrf_id);
int ospf_area_nssa_no_summary_set(struct ospf *, struct in_addr);
const char *ospf_get_name(const struct ospf *ospf);
+extern struct ospf_interface *add_ospf_interface(struct connected *co,
+ struct ospf_area *area);
+
+extern int p_spaces_compare_func(const struct p_space *a,
+ const struct p_space *b);
+extern int q_spaces_compare_func(const struct q_space *a,
+ const struct q_space *b);
#endif /* _ZEBRA_OSPFD_H */
diff --git a/ospfd/subdir.am b/ospfd/subdir.am
index 1a807ea12b..28d58452df 100644
--- a/ospfd/subdir.am
+++ b/ospfd/subdir.am
@@ -52,6 +52,7 @@ ospfd_libfrrospf_a_SOURCES = \
ospfd/ospf_route.c \
ospfd/ospf_routemap.c \
ospfd/ospf_spf.c \
+ ospfd/ospf_ti_lfa.c \
ospfd/ospf_sr.c \
ospfd/ospf_te.c \
ospfd/ospf_vty.c \
@@ -100,6 +101,7 @@ noinst_HEADERS += \
ospfd/ospf_ri.h \
ospfd/ospf_route.h \
ospfd/ospf_spf.h \
+ ospfd/ospf_ti_lfa.h \
ospfd/ospf_sr.h \
ospfd/ospf_te.h \
ospfd/ospf_vty.h \
diff --git a/pbrd/pbr_map.c b/pbrd/pbr_map.c
index 01caff5b52..5b851988f6 100644
--- a/pbrd/pbr_map.c
+++ b/pbrd/pbr_map.c
@@ -304,7 +304,7 @@ static void pbrms_vrf_update(struct pbr_map_sequence *pbrms,
if (pbrms->vrf_lookup
&& (strncmp(vrf_name, pbrms->vrf_name, sizeof(pbrms->vrf_name))
== 0)) {
- DEBUGD(&pbr_dbg_map, "\tSeq %u uses vrf %s (%u), updating map",
+ DEBUGD(&pbr_dbg_map, " Seq %u uses vrf %s (%u), updating map",
pbrms->seqno, vrf_name, pbr_vrf_id(pbr_vrf));
pbr_map_check(pbrms, false);
@@ -666,7 +666,7 @@ void pbr_map_schedule_policy_from_nhg(const char *nh_group, bool installed)
RB_FOREACH (pbrm, pbr_map_entry_head, &pbr_maps) {
DEBUGD(&pbr_dbg_map, "%s: Looking at %s", __func__, pbrm->name);
for (ALL_LIST_ELEMENTS_RO(pbrm->seqnumbers, node, pbrms)) {
- DEBUGD(&pbr_dbg_map, "\tNH Grp name: %s",
+ DEBUGD(&pbr_dbg_map, " NH Grp name: %s",
pbrms->nhgrp_name ?
pbrms->nhgrp_name : pbrms->internal_nhg_name);
@@ -707,7 +707,7 @@ void pbr_map_policy_install(const char *name)
if (pbrm->valid && pbrms->nhs_installed
&& pbrm->incoming->count) {
- DEBUGD(&pbr_dbg_map, "\tInstalling %s %u", pbrm->name,
+ DEBUGD(&pbr_dbg_map, " Installing %s %u", pbrm->name,
pbrms->seqno);
for (ALL_LIST_ELEMENTS_RO(pbrm->incoming, inode, pmi))
if (pbr_map_interface_is_valid(pmi))
@@ -861,7 +861,7 @@ void pbr_map_check(struct pbr_map_sequence *pbrms, bool changed)
DEBUGD(&pbr_dbg_map, "%s: Installing %s(%u) reason: %" PRIu64,
__func__, pbrm->name, pbrms->seqno, pbrms->reason);
DEBUGD(&pbr_dbg_map,
- "\tSending PBR_MAP_POLICY_INSTALL event");
+ " Sending PBR_MAP_POLICY_INSTALL event");
} else {
install = false;
DEBUGD(&pbr_dbg_map, "%s: Removing %s(%u) reason: %" PRIu64,
diff --git a/pbrd/pbr_nht.c b/pbrd/pbr_nht.c
index dbe5de724c..ba9ad97ab8 100644
--- a/pbrd/pbr_nht.c
+++ b/pbrd/pbr_nht.c
@@ -213,7 +213,7 @@ void pbr_nhgroup_add_cb(const char *name)
nhgc = nhgc_find(name);
if (!nhgc) {
- DEBUGD(&pbr_dbg_nht, "%s: Could not find nhgc with name: %s\n",
+ DEBUGD(&pbr_dbg_nht, "%s: Could not find nhgc with name: %s",
__func__, name);
return;
}
@@ -320,13 +320,6 @@ void pbr_nhgroup_delete_cb(const char *name)
pbr_map_check_nh_group_change(name);
}
-#if 0
-static struct pbr_nexthop_cache *pbr_nht_lookup_nexthop(struct nexthop *nexthop)
-{
- return NULL;
-}
-#endif
-
static void
pbr_nht_find_nhg_from_table_update(struct pbr_nexthop_group_cache *pnhgc,
uint32_t table_id, bool installed)
@@ -627,7 +620,7 @@ struct pbr_nexthop_group_cache *pbr_nht_add_group(const char *name)
nhgc = nhgc_find(name);
if (!nhgc) {
- DEBUGD(&pbr_dbg_nht, "%s: Could not find nhgc with name: %s\n",
+ DEBUGD(&pbr_dbg_nht, "%s: Could not find nhgc with name: %s",
__func__, name);
return NULL;
}
@@ -693,7 +686,7 @@ bool pbr_nht_nexthop_group_valid(const char *name)
pnhgc = hash_get(pbr_nhg_hash, &lookup, NULL);
if (!pnhgc)
return false;
- DEBUGD(&pbr_dbg_nht, "%s: \t%d %d", __func__, pnhgc->valid,
+ DEBUGD(&pbr_dbg_nht, "%s: %d %d", __func__, pnhgc->valid,
pnhgc->installed);
if (pnhgc->valid && pnhgc->installed)
return true;
@@ -718,7 +711,6 @@ pbr_nht_individual_nexthop_gw_update(struct pbr_nexthop_cache *pnhc,
struct pbr_nht_individual *pnhi)
{
bool is_valid = pnhc->valid;
- bool all_done = false;
/*
* If we have an interface down event, let's note that
@@ -730,58 +722,36 @@ pbr_nht_individual_nexthop_gw_update(struct pbr_nexthop_cache *pnhc,
* interface event.
*/
if (!pnhi->nhr && pnhi->ifp) {
- struct connected *connected;
- struct listnode *node;
- struct prefix p;
-
switch (pnhc->nexthop.type) {
case NEXTHOP_TYPE_BLACKHOLE:
- all_done = true;
- break;
- case NEXTHOP_TYPE_IFINDEX:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- is_valid = if_is_up(pnhi->ifp);
- all_done = true;
- break;
case NEXTHOP_TYPE_IPV4:
- p.family = AF_INET;
- p.prefixlen = IPV4_MAX_BITLEN;
- p.u.prefix4 = pnhc->nexthop.gate.ipv4;
- break;
case NEXTHOP_TYPE_IPV6:
- p.family = AF_INET6;
- p.prefixlen = IPV6_MAX_BITLEN;
- memcpy(&p.u.prefix6, &pnhc->nexthop.gate.ipv6,
- sizeof(struct in6_addr));
- break;
- }
-
- /* Early exit in a couple of cases. */
- if (all_done)
goto done;
-
- FOR_ALL_INTERFACES_ADDRESSES (pnhi->ifp, connected, node) {
- if (prefix_match(connected->address, &p)) {
+ case NEXTHOP_TYPE_IFINDEX:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ if (pnhc->nexthop.ifindex == pnhi->ifp->ifindex)
is_valid = if_is_up(pnhi->ifp);
- break;
- }
+ goto done;
}
+
goto done;
}
- switch (pnhi->nhr->prefix.family) {
- case AF_INET:
- if (pnhc->nexthop.gate.ipv4.s_addr
- != pnhi->nhr->prefix.u.prefix4.s_addr)
- goto done; /* Unrelated change */
- break;
- case AF_INET6:
- if (memcmp(&pnhc->nexthop.gate.ipv6,
- &pnhi->nhr->prefix.u.prefix6, 16)
- != 0)
- goto done; /* Unrelated change */
- break;
+ if (pnhi->nhr) {
+ switch (pnhi->nhr->prefix.family) {
+ case AF_INET:
+ if (pnhc->nexthop.gate.ipv4.s_addr
+ != pnhi->nhr->prefix.u.prefix4.s_addr)
+ goto done; /* Unrelated change */
+ break;
+ case AF_INET6:
+ if (memcmp(&pnhc->nexthop.gate.ipv6,
+ &pnhi->nhr->prefix.u.prefix6, 16)
+ != 0)
+ goto done; /* Unrelated change */
+ break;
+ }
}
pnhi->nhr_matched = true;
@@ -881,7 +851,7 @@ static void pbr_nht_individual_nexthop_update_lookup(struct hash_bucket *b,
pbr_nht_individual_nexthop_update(pnhc, pnhi);
- DEBUGD(&pbr_dbg_nht, "\tFound %pFX: old: %d new: %d",
+ DEBUGD(&pbr_dbg_nht, " Found %pFX: old: %d new: %d",
&pnhi->nhr->prefix, old_valid, pnhc->valid);
if (pnhc->valid)
@@ -1132,7 +1102,7 @@ pbr_nht_individual_nexthop_interface_update_lookup(struct hash_bucket *b,
pbr_nht_individual_nexthop_update(pnhc, pnhi);
- DEBUGD(&pbr_dbg_nht, "\tFound %s: old: %d new: %d", pnhi->ifp->name,
+ DEBUGD(&pbr_dbg_nht, " Found %s: old: %d new: %d", pnhi->ifp->name,
old_valid, pnhc->valid);
if (pnhc->valid)
diff --git a/pbrd/pbr_zebra.c b/pbrd/pbr_zebra.c
index 222a10e751..467bbc8f72 100644
--- a/pbrd/pbr_zebra.c
+++ b/pbrd/pbr_zebra.c
@@ -266,7 +266,7 @@ static void route_add_helper(struct zapi_route *api, struct nexthop_group nhg,
api->prefix.family = install_afi;
- DEBUGD(&pbr_dbg_zebra, "\tEncoding %pFX", &api->prefix);
+ DEBUGD(&pbr_dbg_zebra, " Encoding %pFX", &api->prefix);
i = 0;
for (ALL_NEXTHOPS(nhg, nhop)) {
@@ -409,12 +409,12 @@ static int pbr_zebra_nexthop_update(ZAPI_CALLBACK_ARGS)
DEBUGD(&pbr_dbg_zebra, "%s: Received Nexthop update: %pFX",
__func__, &nhr.prefix);
- DEBUGD(&pbr_dbg_zebra, "%s: (\tNexthops(%u)", __func__,
+ DEBUGD(&pbr_dbg_zebra, "%s: (Nexthops(%u)", __func__,
nhr.nexthop_num);
for (i = 0; i < nhr.nexthop_num; i++) {
DEBUGD(&pbr_dbg_zebra,
- "%s: \tType: %d: vrf: %d, ifindex: %d gate: %pI4",
+ "%s: Type: %d: vrf: %d, ifindex: %d gate: %pI4",
__func__, nhr.nexthops[i].type,
nhr.nexthops[i].vrf_id, nhr.nexthops[i].ifindex,
&nhr.nexthops[i].gate.ipv4);
@@ -585,7 +585,7 @@ bool pbr_send_pbr_map(struct pbr_map_sequence *pbrms,
*/
stream_putl(s, 1);
- DEBUGD(&pbr_dbg_zebra, "%s: \t%s %s seq %u %d %s %u", __func__,
+ DEBUGD(&pbr_dbg_zebra, "%s: %s %s seq %u %d %s %u", __func__,
install ? "Installing" : "Deleting", pbrm->name, pbrms->seqno,
install, pmi->ifp->name, pmi->delete);
diff --git a/pimd/pim_bfd.c b/pimd/pim_bfd.c
index 1d653cdc3f..5e1b9a69e1 100644
--- a/pimd/pim_bfd.c
+++ b/pimd/pim_bfd.c
@@ -217,7 +217,7 @@ static int pim_bfd_interface_dest_update(ZAPI_CALLBACK_ARGS)
{
struct interface *ifp = NULL;
struct pim_interface *pim_ifp = NULL;
- struct prefix p;
+ struct prefix p, src_p;
int status;
char msg[100];
int old_status;
@@ -227,8 +227,8 @@ static int pim_bfd_interface_dest_update(ZAPI_CALLBACK_ARGS)
struct listnode *neigh_nextnode = NULL;
struct pim_neighbor *neigh = NULL;
- ifp = bfd_get_peer_info(zclient->ibuf, &p, NULL, &status,
- NULL, vrf_id);
+ ifp = bfd_get_peer_info(zclient->ibuf, &p, &src_p, &status, NULL,
+ vrf_id);
if ((ifp == NULL) || (p.family != AF_INET))
return 0;
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index ff85151839..714d6e8e1d 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -795,15 +795,26 @@ static void igmp_show_interfaces_single(struct pim_instance *pim,
}
}
-static void igmp_show_interface_join(struct pim_instance *pim, struct vty *vty)
+static void igmp_show_interface_join(struct pim_instance *pim, struct vty *vty,
+ bool uj)
{
struct interface *ifp;
time_t now;
+ json_object *json = NULL;
+ json_object *json_iface = NULL;
+ json_object *json_grp = NULL;
+ json_object *json_grp_arr = NULL;
now = pim_time_monotonic_sec();
- vty_out(vty,
- "Interface Address Source Group Socket Uptime \n");
+ if (uj) {
+ json = json_object_new_object();
+ json_object_string_add(json, "vrf",
+ vrf_id_to_name(pim->vrf_id));
+ } else {
+ vty_out(vty,
+ "Interface Address Source Group Socket Uptime \n");
+ }
FOR_ALL_INTERFACES (pim->vrf, ifp) {
struct pim_interface *pim_ifp;
@@ -837,12 +848,49 @@ static void igmp_show_interface_join(struct pim_instance *pim, struct vty *vty)
pim_inet4_dump("<src?>", ij->source_addr, source_str,
sizeof(source_str));
- vty_out(vty, "%-16s %-15s %-15s %-15s %6d %8s\n",
- ifp->name, pri_addr_str, source_str, group_str,
- ij->sock_fd, uptime);
+ if (uj) {
+ json_object_object_get_ex(json, ifp->name,
+ &json_iface);
+
+ if (!json_iface) {
+ json_iface = json_object_new_object();
+ json_object_string_add(
+ json_iface, "name", ifp->name);
+ json_object_object_add(json, ifp->name,
+ json_iface);
+ json_grp_arr = json_object_new_array();
+ json_object_object_add(json_iface,
+ "groups",
+ json_grp_arr);
+ }
+
+ json_grp = json_object_new_object();
+ json_object_string_add(json_grp, "source",
+ source_str);
+ json_object_string_add(json_grp, "group",
+ group_str);
+ json_object_string_add(json_grp, "primaryAddr",
+ pri_addr_str);
+ json_object_int_add(json_grp, "sockFd",
+ ij->sock_fd);
+ json_object_string_add(json_grp, "upTime",
+ uptime);
+ json_object_array_add(json_grp_arr, json_grp);
+ } else {
+ vty_out(vty,
+ "%-16s %-15s %-15s %-15s %6d %8s\n",
+ ifp->name, pri_addr_str, source_str,
+ group_str, ij->sock_fd, uptime);
+ }
} /* for (pim_ifp->igmp_join_list) */
} /* for (iflist) */
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
}
static void pim_show_interfaces_single(struct pim_instance *pim,
@@ -4217,32 +4265,35 @@ DEFUN (show_ip_igmp_interface_vrf_all,
DEFUN (show_ip_igmp_join,
show_ip_igmp_join_cmd,
- "show ip igmp [vrf NAME] join",
+ "show ip igmp [vrf NAME] join [json]",
SHOW_STR
IP_STR
IGMP_STR
VRF_CMD_HELP_STR
- "IGMP static join information\n")
+ "IGMP static join information\n"
+ JSON_STR)
{
int idx = 2;
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ bool uj = use_json(argc, argv);
if (!vrf)
return CMD_WARNING;
- igmp_show_interface_join(vrf->info, vty);
+ igmp_show_interface_join(vrf->info, vty, uj);
return CMD_SUCCESS;
}
DEFUN (show_ip_igmp_join_vrf_all,
show_ip_igmp_join_vrf_all_cmd,
- "show ip igmp vrf all join",
+ "show ip igmp vrf all join [json]",
SHOW_STR
IP_STR
IGMP_STR
VRF_CMD_HELP_STR
- "IGMP static join information\n")
+ "IGMP static join information\n"
+ JSON_STR)
{
bool uj = use_json(argc, argv);
struct vrf *vrf;
@@ -4258,7 +4309,7 @@ DEFUN (show_ip_igmp_join_vrf_all,
first = false;
} else
vty_out(vty, "VRF: %s\n", vrf->name);
- igmp_show_interface_join(vrf->info, vty);
+ igmp_show_interface_join(vrf->info, vty, uj);
}
if (uj)
vty_out(vty, "}\n");
@@ -11022,9 +11073,9 @@ static void pim_show_vxlan_sg_entry(struct pim_vxlan_sg *vxlan_sg,
}
}
-static void pim_show_vxlan_sg_hash_entry(struct hash_bucket *backet, void *arg)
+static void pim_show_vxlan_sg_hash_entry(struct hash_bucket *bucket, void *arg)
{
- pim_show_vxlan_sg_entry((struct pim_vxlan_sg *)backet->data,
+ pim_show_vxlan_sg_entry((struct pim_vxlan_sg *)bucket->data,
(struct pim_sg_cache_walk_data *)arg);
}
diff --git a/pimd/pim_hello.c b/pimd/pim_hello.c
index e50504ec10..6f5c4174e2 100644
--- a/pimd/pim_hello.c
+++ b/pimd/pim_hello.c
@@ -95,22 +95,6 @@ static void tlv_trace_uint32_hex(const char *label, const char *tlv_name,
}
}
-#if 0
-static void tlv_trace(const char *label, const char *tlv_name,
- const char *ifname, struct in_addr src_addr,
- int isset)
-{
- if (isset) {
- char src_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<src?>", src_addr, src_str, sizeof(src_str));
- zlog_debug("%s: PIM hello option from %s on interface %s: %s",
- label,
- src_str, ifname,
- tlv_name);
- }
-}
-#endif
-
static void tlv_trace_list(const char *label, const char *tlv_name,
const char *ifname, struct in_addr src_addr,
int isset, struct list *addr_list)
diff --git a/pimd/pim_ifchannel.c b/pimd/pim_ifchannel.c
index fc0f514a49..cdaf7bcdd4 100644
--- a/pimd/pim_ifchannel.c
+++ b/pimd/pim_ifchannel.c
@@ -729,6 +729,21 @@ static int on_ifjoin_prune_pending_timer(struct thread *t)
pim_jp_agg_single_upstream_send(&parent->rpf,
parent, true);
+ /*
+ * SGRpt prune pending expiry has to install
+ * SG entry with empty olist to drop the SG
+ * traffic incase no other intf exists.
+ * On that scenario, SG entry wouldn't have
+ * got installed until Prune pending timer
+ * expired. So install now.
+ */
+ pim_channel_del_oif(
+ ch->upstream->channel_oil, ifp,
+ PIM_OIF_FLAG_PROTO_STAR, __func__);
+ if (!ch->upstream->channel_oil->installed)
+ pim_upstream_mroute_add(
+ ch->upstream->channel_oil,
+ __PRETTY_FUNCTION__);
}
}
/* from here ch may have been deleted */
@@ -1113,6 +1128,24 @@ void pim_ifchannel_prune(struct interface *ifp, struct in_addr upstream,
case PIM_IFJOIN_PRUNE:
if (source_flags & PIM_ENCODE_RPT_BIT) {
THREAD_OFF(ch->t_ifjoin_prune_pending_timer);
+ /*
+ * While in Prune State, Receive SGRpt Prune.
+ * RFC 7761 Sec 4.5.3:
+ * The (S,G,rpt) downstream state machine on interface I
+ * remains in Prune state. The Expiry Timer (ET) is
+ * restarted and is then set to the maximum of its
+ * current value and the HoldTime from the triggering
+ * Join/Prune message.
+ */
+ if (ch->t_ifjoin_expiry_timer) {
+ unsigned long rem = thread_timer_remain_second(
+ ch->t_ifjoin_expiry_timer);
+
+ if (rem > holdtime)
+ return;
+ THREAD_OFF(ch->t_ifjoin_expiry_timer);
+ }
+
thread_add_timer(router->master, on_ifjoin_expiry_timer,
ch, holdtime,
&ch->t_ifjoin_expiry_timer);
diff --git a/pimd/pim_instance.c b/pimd/pim_instance.c
index b7e49078ef..019048abf1 100644
--- a/pimd/pim_instance.c
+++ b/pimd/pim_instance.c
@@ -71,6 +71,8 @@ static void pim_instance_terminate(struct pim_instance *pim)
XFREE(MTYPE_PIM_PLIST_NAME, pim->spt.plist);
XFREE(MTYPE_PIM_PLIST_NAME, pim->register_plist);
+
+ pim->vrf = NULL;
XFREE(MTYPE_PIM_PIM_INSTANCE, pim);
}
@@ -153,10 +155,16 @@ static int pim_vrf_delete(struct vrf *vrf)
{
struct pim_instance *pim = vrf->info;
+ if (!pim)
+ return 0;
+
zlog_debug("VRF Deletion: %s(%u)", vrf->name, vrf->vrf_id);
pim_ssmpingd_destroy(pim);
pim_instance_terminate(pim);
+
+ vrf->info = NULL;
+
return 0;
}
diff --git a/pimd/pim_jp_agg.c b/pimd/pim_jp_agg.c
index 5279a00855..d95d9dd25d 100644
--- a/pimd/pim_jp_agg.c
+++ b/pimd/pim_jp_agg.c
@@ -360,11 +360,9 @@ void pim_jp_agg_switch_interface(struct pim_rpf *orpf, struct pim_rpf *nrpf,
void pim_jp_agg_single_upstream_send(struct pim_rpf *rpf,
struct pim_upstream *up, bool is_join)
{
- static struct list *groups = NULL;
- static struct pim_jp_agg_group jag;
- static struct pim_jp_sources js;
-
- static bool first = true;
+ struct list groups, sources;
+ struct pim_jp_agg_group jag;
+ struct pim_jp_sources js;
/* skip JP upstream messages if source is directly connected */
if (!up || !rpf->source_nexthop.interface ||
@@ -373,19 +371,19 @@ void pim_jp_agg_single_upstream_send(struct pim_rpf *rpf,
if_is_loopback_or_vrf(rpf->source_nexthop.interface))
return;
- if (first) {
- groups = list_new();
- jag.sources = list_new();
-
- listnode_add(groups, &jag);
- listnode_add(jag.sources, &js);
+ memset(&groups, 0, sizeof(groups));
+ memset(&sources, 0, sizeof(sources));
+ jag.sources = &sources;
- first = false;
- }
+ listnode_add(&groups, &jag);
+ listnode_add(jag.sources, &js);
jag.group.s_addr = up->sg.grp.s_addr;
js.up = up;
js.is_join = is_join;
- pim_joinprune_send(rpf, groups);
+ pim_joinprune_send(rpf, &groups);
+
+ list_delete_all_node(jag.sources);
+ list_delete_all_node(&groups);
}
diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c
index 23259900b7..afd38face3 100644
--- a/pimd/pim_mroute.c
+++ b/pimd/pim_mroute.c
@@ -1221,10 +1221,9 @@ void pim_mroute_update_counters(struct channel_oil *c_oil)
sg.src = c_oil->oil.mfcc_origin;
sg.grp = c_oil->oil.mfcc_mcastgrp;
- if (PIM_DEBUG_MROUTE)
- zlog_debug(
- "Channel%s is not installed no need to collect data from kernel",
- pim_str_sg_dump(&sg));
+ zlog_debug(
+ "Channel%s is not installed no need to collect data from kernel",
+ pim_str_sg_dump(&sg));
}
return;
}
diff --git a/pimd/pim_msdp_socket.c b/pimd/pim_msdp_socket.c
index 7620cd5792..ddd8dc6bf9 100644
--- a/pimd/pim_msdp_socket.c
+++ b/pimd/pim_msdp_socket.c
@@ -44,7 +44,7 @@ static void pim_msdp_update_sock_send_buffer_size(int fd)
if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &optval, &optlen) < 0) {
flog_err_sys(EC_LIB_SOCKET,
- "getsockopt of SO_SNDBUF failed %s\n",
+ "getsockopt of SO_SNDBUF failed %s",
safe_strerror(errno));
return;
}
@@ -53,7 +53,7 @@ static void pim_msdp_update_sock_send_buffer_size(int fd)
if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &size, sizeof(size))
< 0) {
flog_err_sys(EC_LIB_SOCKET,
- "Couldn't increase send buffer: %s\n",
+ "Couldn't increase send buffer: %s",
safe_strerror(errno));
}
}
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index ba044de2f8..4bc78529a8 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -545,7 +545,7 @@ static int pim_cmd_igmp_start(struct interface *ifp)
pim_ifp = ifp->info;
if (!pim_ifp) {
- (void)pim_if_new(ifp, true, false, false, false);
+ pim_ifp = pim_if_new(ifp, true, false, false, false);
need_startup = 1;
} else {
if (!PIM_IF_TEST_IGMP(pim_ifp->options)) {
@@ -553,6 +553,7 @@ static int pim_cmd_igmp_start(struct interface *ifp)
need_startup = 1;
}
}
+ pim_if_create_pimreg(pim_ifp->pim);
/* 'ip igmp' executed multiple times, with need_startup
* avoid multiple if add all and membership refresh
diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c
index 301a27001f..dbba6b66d8 100644
--- a/pimd/pim_rp.c
+++ b/pimd/pim_rp.c
@@ -242,7 +242,7 @@ struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
if (!rn) {
flog_err(
EC_LIB_DEVELOPMENT,
- "%s: BUG We should have found default group information\n",
+ "%s: BUG We should have found default group information",
__func__);
return best;
}
diff --git a/pimd/pim_vxlan.c b/pimd/pim_vxlan.c
index 380c97a97c..6a12c7fb13 100644
--- a/pimd/pim_vxlan.c
+++ b/pimd/pim_vxlan.c
@@ -497,10 +497,10 @@ static void pim_vxlan_orig_mr_del(struct pim_vxlan_sg *vxlan_sg)
pim_vxlan_orig_mr_up_del(vxlan_sg);
}
-static void pim_vxlan_orig_mr_iif_update(struct hash_bucket *backet, void *arg)
+static void pim_vxlan_orig_mr_iif_update(struct hash_bucket *bucket, void *arg)
{
struct interface *ifp;
- struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)backet->data;
+ struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)bucket->data;
struct interface *old_iif = vxlan_sg->iif;
if (!pim_vxlan_is_orig_mroute(vxlan_sg))
@@ -812,11 +812,11 @@ bool pim_vxlan_do_mlag_reg(void)
* to the MLAG peer which may mroute it over the underlay if there are any
* interested receivers.
*/
-static void pim_vxlan_sg_peerlink_oif_update(struct hash_bucket *backet,
+static void pim_vxlan_sg_peerlink_oif_update(struct hash_bucket *bucket,
void *arg)
{
struct interface *new_oif = (struct interface *)arg;
- struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)backet->data;
+ struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)bucket->data;
if (!pim_vxlan_is_orig_mroute(vxlan_sg))
return;
@@ -950,10 +950,10 @@ static void pim_vxlan_up_cost_update(struct pim_instance *pim,
}
}
-static void pim_vxlan_term_mr_cost_update(struct hash_bucket *backet, void *arg)
+static void pim_vxlan_term_mr_cost_update(struct hash_bucket *bucket, void *arg)
{
struct interface *old_peerlink_rif = (struct interface *)arg;
- struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)backet->data;
+ struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)bucket->data;
struct pim_upstream *up;
struct listnode *listnode;
struct pim_upstream *child;
@@ -975,11 +975,11 @@ static void pim_vxlan_term_mr_cost_update(struct hash_bucket *backet, void *arg)
old_peerlink_rif);
}
-static void pim_vxlan_sg_peerlink_rif_update(struct hash_bucket *backet,
+static void pim_vxlan_sg_peerlink_rif_update(struct hash_bucket *bucket,
void *arg)
{
- pim_vxlan_orig_mr_iif_update(backet, NULL);
- pim_vxlan_term_mr_cost_update(backet, arg);
+ pim_vxlan_orig_mr_iif_update(bucket, NULL);
+ pim_vxlan_term_mr_cost_update(bucket, arg);
}
static void pim_vxlan_set_peerlink_rif(struct pim_instance *pim,
@@ -1032,10 +1032,10 @@ static void pim_vxlan_set_peerlink_rif(struct pim_instance *pim,
}
}
-static void pim_vxlan_term_mr_oif_update(struct hash_bucket *backet, void *arg)
+static void pim_vxlan_term_mr_oif_update(struct hash_bucket *bucket, void *arg)
{
struct interface *ifp = (struct interface *)arg;
- struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)backet->data;
+ struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)bucket->data;
if (pim_vxlan_is_orig_mroute(vxlan_sg))
return;
diff --git a/pimd/test_igmpv3_join.c b/pimd/test_igmpv3_join.c
index bf44f3c94a..3c26517e88 100644
--- a/pimd/test_igmpv3_join.c
+++ b/pimd/test_igmpv3_join.c
@@ -54,11 +54,6 @@ static int iface_solve_index(const char *ifname)
}
for (i = 0; ini[i].if_index; ++i) {
-#if 0
- fprintf(stderr,
- "%s: interface=%s matching against local ifname=%s ifindex=%d\n",
- prog_name, ifname, ini[i].if_name, ini[i].if_index);
-#endif
if (!strcmp(ini[i].if_name, ifname)) {
ifindex = ini[i].if_index;
break;
diff --git a/ripd/rip_interface.c b/ripd/rip_interface.c
index c601ab4047..89e7e5dc17 100644
--- a/ripd/rip_interface.c
+++ b/ripd/rip_interface.c
@@ -208,39 +208,6 @@ static void rip_request_interface(struct interface *ifp)
rip_request_interface_send(ifp, RIPv2);
}
-#if 0
-/* Send RIP request to the neighbor. */
-static void
-rip_request_neighbor (struct in_addr addr)
-{
- struct sockaddr_in to;
-
- memset (&to, 0, sizeof(struct sockaddr_in));
- to.sin_port = htons (RIP_PORT_DEFAULT);
- to.sin_addr = addr;
-
- rip_request_send (&to, NULL, rip->version_send, NULL);
-}
-
-/* Request routes at all interfaces. */
-static void
-rip_request_neighbor_all (void)
-{
- struct route_node *rp;
-
- if (! rip)
- return;
-
- if (IS_RIP_DEBUG_EVENT)
- zlog_debug ("request to the all neighbor");
-
- /* Send request to all neighbor. */
- for (rp = route_top (rip->neighbor); rp; rp = route_next (rp))
- if (rp->info)
- rip_request_neighbor (rp->p.u.prefix4);
-}
-#endif
-
/* Multicast packet receive socket. */
static int rip_multicast_join(struct interface *ifp, int sock)
{
diff --git a/ripd/rip_snmp.c b/ripd/rip_snmp.c
index be222c7a5f..4e6ed1400f 100644
--- a/ripd/rip_snmp.c
+++ b/ripd/rip_snmp.c
@@ -547,18 +547,7 @@ static uint8_t *rip2PeerTable(struct variable *v, oid name[], size_t *length,
return (uint8_t *)&domain;
case RIP2PEERLASTUPDATE:
-#if 0
- /* We don't know the SNMP agent startup time. We have two choices here:
- * - assume ripd startup time equals SNMP agent startup time
- * - don't support this variable, at all
- * Currently, we do the latter...
- */
- *val_len = sizeof(time_t);
- uptime = peer->uptime; /* now - snmp_agent_startup - peer->uptime */
- return (uint8_t *) &uptime;
-#else
return (uint8_t *)NULL;
-#endif
case RIP2PEERVERSION:
*val_len = sizeof(int);
diff --git a/ripd/ripd.c b/ripd/ripd.c
index 82dd401f96..a276dedec8 100644
--- a/ripd/ripd.c
+++ b/ripd/ripd.c
@@ -2858,23 +2858,6 @@ void rip_event(struct rip *rip, enum rip_event event, int sock)
}
}
-#if 0
-static void
-rip_update_default_metric (void)
-{
- struct route_node *np;
- struct rip_info *rinfo = NULL;
- struct list *list = NULL;
- struct listnode *listnode = NULL;
-
- for (np = route_top (rip->table); np; np = route_next (np))
- if ((list = np->info) != NULL)
- for (ALL_LIST_ELEMENTS_RO (list, listnode, rinfo))
- if (rinfo->type != ZEBRA_ROUTE_RIP && rinfo->type != ZEBRA_ROUTE_CONNECT)
- rinfo->metric = rip->default_metric;
-}
-#endif
-
struct rip_distance *rip_distance_new(void)
{
return XCALLOC(MTYPE_RIP_DISTANCE, sizeof(struct rip_distance));
diff --git a/ripngd/ripngd.c b/ripngd/ripngd.c
index 1ebdae43fb..a9f570598f 100644
--- a/ripngd/ripngd.c
+++ b/ripngd/ripngd.c
@@ -2217,136 +2217,6 @@ DEFUN (show_ipv6_ripng_status,
return CMD_SUCCESS;
}
-#if 0
-/* RIPng update timer setup. */
-DEFUN (ripng_update_timer,
- ripng_update_timer_cmd,
- "update-timer SECOND",
- "Set RIPng update timer in seconds\n"
- "Seconds\n")
-{
- unsigned long update;
- char *endptr = NULL;
-
- update = strtoul (argv[0], &endptr, 10);
- if (update == ULONG_MAX || *endptr != '\0')
- {
- vty_out (vty, "update timer value error\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- ripng->update_time = update;
-
- ripng_event (RIPNG_UPDATE_EVENT, 0);
- return CMD_SUCCESS;
-}
-
-DEFUN (no_ripng_update_timer,
- no_ripng_update_timer_cmd,
- "no update-timer SECOND",
- NO_STR
- "Unset RIPng update timer in seconds\n"
- "Seconds\n")
-{
- ripng->update_time = RIPNG_UPDATE_TIMER_DEFAULT;
- ripng_event (RIPNG_UPDATE_EVENT, 0);
- return CMD_SUCCESS;
-}
-
-/* RIPng timeout timer setup. */
-DEFUN (ripng_timeout_timer,
- ripng_timeout_timer_cmd,
- "timeout-timer SECOND",
- "Set RIPng timeout timer in seconds\n"
- "Seconds\n")
-{
- unsigned long timeout;
- char *endptr = NULL;
-
- timeout = strtoul (argv[0], &endptr, 10);
- if (timeout == ULONG_MAX || *endptr != '\0')
- {
- vty_out (vty, "timeout timer value error\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- ripng->timeout_time = timeout;
-
- return CMD_SUCCESS;
-}
-
-DEFUN (no_ripng_timeout_timer,
- no_ripng_timeout_timer_cmd,
- "no timeout-timer SECOND",
- NO_STR
- "Unset RIPng timeout timer in seconds\n"
- "Seconds\n")
-{
- ripng->timeout_time = RIPNG_TIMEOUT_TIMER_DEFAULT;
- return CMD_SUCCESS;
-}
-
-/* RIPng garbage timer setup. */
-DEFUN (ripng_garbage_timer,
- ripng_garbage_timer_cmd,
- "garbage-timer SECOND",
- "Set RIPng garbage timer in seconds\n"
- "Seconds\n")
-{
- unsigned long garbage;
- char *endptr = NULL;
-
- garbage = strtoul (argv[0], &endptr, 10);
- if (garbage == ULONG_MAX || *endptr != '\0')
- {
- vty_out (vty, "garbage timer value error\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- ripng->garbage_time = garbage;
-
- return CMD_SUCCESS;
-}
-
-DEFUN (no_ripng_garbage_timer,
- no_ripng_garbage_timer_cmd,
- "no garbage-timer SECOND",
- NO_STR
- "Unset RIPng garbage timer in seconds\n"
- "Seconds\n")
-{
- ripng->garbage_time = RIPNG_GARBAGE_TIMER_DEFAULT;
- return CMD_SUCCESS;
-}
-#endif /* 0 */
-
-#if 0
-DEFUN (show_ipv6_protocols,
- show_ipv6_protocols_cmd,
- "show ipv6 protocols",
- SHOW_STR
- IPV6_STR
- "Routing protocol information\n")
-{
- if (! ripng)
- return CMD_SUCCESS;
-
- vty_out (vty, "Routing Protocol is \"ripng\"\n");
-
- vty_out (vty, "Sending updates every %ld seconds, next due in %d seconds\n",
- ripng->update_time, 0);
-
- vty_out (vty, "Timerout after %ld seconds, garbage correct %ld\n",
- ripng->timeout_time,
- ripng->garbage_time);
-
- vty_out (vty, "Outgoing update filter list for all interfaces is not set");
- vty_out (vty, "Incoming update filter list for all interfaces is not set");
-
- return CMD_SUCCESS;
-}
-#endif
-
/* Update ECMP routes to zebra when ECMP is disabled. */
void ripng_ecmp_disable(struct ripng *ripng)
{
@@ -2847,16 +2717,6 @@ void ripng_init(void)
install_default(RIPNG_NODE);
-#if 0
- install_element (VIEW_NODE, &show_ipv6_protocols_cmd);
- install_element (RIPNG_NODE, &ripng_update_timer_cmd);
- install_element (RIPNG_NODE, &no_ripng_update_timer_cmd);
- install_element (RIPNG_NODE, &ripng_timeout_timer_cmd);
- install_element (RIPNG_NODE, &no_ripng_timeout_timer_cmd);
- install_element (RIPNG_NODE, &ripng_garbage_timer_cmd);
- install_element (RIPNG_NODE, &no_ripng_garbage_timer_cmd);
-#endif /* 0 */
-
ripng_if_init();
ripng_debug_init();
diff --git a/ripngd/ripngd.h b/ripngd/ripngd.h
index a42c32ebb7..14ac29b3fe 100644
--- a/ripngd/ripngd.h
+++ b/ripngd/ripngd.h
@@ -230,35 +230,6 @@ struct ripng_info {
struct agg_node *rp;
};
-#ifdef notyet
-#if 0
-/* RIPng tag structure. */
-struct ripng_tag
-{
- /* Tag value. */
- uint16_t tag;
-
- /* Port. */
- uint16_t port;
-
- /* Multicast group. */
- struct in6_addr maddr;
-
- /* Table number. */
- int table;
-
- /* Distance. */
- int distance;
-
- /* Split horizon. */
- uint8_t split_horizon;
-
- /* Poison reverse. */
- uint8_t poison_reverse;
-};
-#endif /* 0 */
-#endif /* not yet */
-
typedef enum {
RIPNG_NO_SPLIT_HORIZON = 0,
RIPNG_SPLIT_HORIZON,
@@ -294,13 +265,6 @@ struct ripng_interface {
/* Route-map. */
struct route_map *routemap[RIPNG_FILTER_MAX];
-#ifdef notyet
-#if 0
- /* RIPng tag configuration. */
- struct ripng_tag *rtag;
-#endif /* 0 */
-#endif /* notyet */
-
/* Default information originate. */
uint8_t default_originate;
diff --git a/sharpd/sharp_zebra.c b/sharpd/sharp_zebra.c
index 4445bc0132..fed732b843 100644
--- a/sharpd/sharp_zebra.c
+++ b/sharpd/sharp_zebra.c
@@ -539,6 +539,7 @@ void nhg_add(uint32_t id, const struct nexthop_group *nhg,
struct zapi_nhg api_nhg = {};
struct zapi_nexthop *api_nh;
struct nexthop *nh;
+ bool is_valid = true;
api_nhg.id = id;
for (ALL_NEXTHOPS_PTR(nhg, nh)) {
@@ -549,12 +550,25 @@ void nhg_add(uint32_t id, const struct nexthop_group *nhg,
break;
}
+ /* Unresolved nexthops will lead to failure - only send
+ * nexthops that zebra will consider valid.
+ */
+ if (nh->ifindex == 0)
+ continue;
+
api_nh = &api_nhg.nexthops[api_nhg.nexthop_num];
zapi_nexthop_from_nexthop(api_nh, nh);
api_nhg.nexthop_num++;
}
+ if (api_nhg.nexthop_num == 0) {
+ zlog_debug("%s: nhg %u not sent: no valid nexthops",
+ __func__, id);
+ is_valid = false;
+ goto done;
+ }
+
if (backup_nhg) {
for (ALL_NEXTHOPS_PTR(backup_nhg, nh)) {
if (api_nhg.backup_nexthop_num >= MULTIPATH_NUM) {
@@ -563,6 +577,20 @@ void nhg_add(uint32_t id, const struct nexthop_group *nhg,
__func__);
break;
}
+
+ /* Unresolved nexthop: will be rejected by zebra.
+ * That causes a problem, since the primary nexthops
+ * rely on array indexing into the backup nexthops. If
+ * that array isn't valid, the backup indexes won't be
+ * valid.
+ */
+ if (nh->ifindex == 0) {
+ zlog_debug("%s: nhg %u: invalid backup nexthop",
+ __func__, id);
+ is_valid = false;
+ break;
+ }
+
api_nh = &api_nhg.backup_nexthops
[api_nhg.backup_nexthop_num];
@@ -571,7 +599,9 @@ void nhg_add(uint32_t id, const struct nexthop_group *nhg,
}
}
- zclient_nhg_send(zclient, ZEBRA_NHG_ADD, &api_nhg);
+done:
+ if (is_valid)
+ zclient_nhg_send(zclient, ZEBRA_NHG_ADD, &api_nhg);
}
void nhg_del(uint32_t id)
diff --git a/staticd/static_nb.c b/staticd/static_nb.c
index 2fdd0d2989..a2a14751cf 100644
--- a/staticd/static_nb.c
+++ b/staticd/static_nb.c
@@ -66,7 +66,6 @@ const struct frr_yang_module_info frr_staticd_info = {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/frr-nexthops/nexthop/onlink",
.cbs = {
.modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_onlink_modify,
- .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_onlink_destroy,
}
},
{
@@ -145,7 +144,6 @@ const struct frr_yang_module_info frr_staticd_info = {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/onlink",
.cbs = {
.modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_modify,
- .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_destroy,
}
},
{
diff --git a/staticd/static_nb.h b/staticd/static_nb.h
index b293224dd1..e85e1d0e9f 100644
--- a/staticd/static_nb.h
+++ b/staticd/static_nb.h
@@ -41,8 +41,6 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_pa
struct nb_cb_destroy_args *args);
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_onlink_modify(
struct nb_cb_modify_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_onlink_destroy(
- struct nb_cb_destroy_args *args);
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_color_modify(
struct nb_cb_modify_args *args);
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_color_destroy(
@@ -83,8 +81,6 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_sr
struct nb_cb_destroy_args *args);
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_modify(
struct nb_cb_modify_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_destroy(
- struct nb_cb_destroy_args *args);
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_modify(
struct nb_cb_modify_args *args);
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_destroy(
diff --git a/staticd/static_nb_config.c b/staticd/static_nb_config.c
index 3778960cd1..bf669957bf 100644
--- a/staticd/static_nb_config.c
+++ b/staticd/static_nb_config.c
@@ -287,9 +287,27 @@ static int static_nexthop_mpls_label_modify(struct nb_cb_modify_args *args)
static int static_nexthop_onlink_modify(struct nb_cb_modify_args *args)
{
struct static_nexthop *nh;
+ static_types nh_type;
- nh = nb_running_get_entry(args->dnode, NULL, true);
- nh->onlink = yang_dnode_get_bool(args->dnode, NULL);
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ nh_type = yang_dnode_get_enum(args->dnode, "../nh-type");
+ if ((nh_type != STATIC_IPV4_GATEWAY_IFNAME)
+ && (nh_type != STATIC_IPV6_GATEWAY_IFNAME)) {
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "nexthop type is not the ipv4 or ipv6 interface type");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ nh = nb_running_get_entry(args->dnode, NULL, true);
+ nh->onlink = yang_dnode_get_bool(args->dnode, NULL);
+ break;
+ }
return NB_OK;
}
@@ -317,9 +335,25 @@ static int static_nexthop_color_destroy(struct nb_cb_destroy_args *args)
static int static_nexthop_bh_type_modify(struct nb_cb_modify_args *args)
{
struct static_nexthop *nh;
+ static_types nh_type;
- nh = nb_running_get_entry(args->dnode, NULL, true);
- nh->bh_type = yang_dnode_get_enum(args->dnode, NULL);
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ nh_type = yang_dnode_get_enum(args->dnode, "../nh-type");
+ if (nh_type != STATIC_BLACKHOLE) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "nexthop type is not the blackhole type");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ nh = nb_running_get_entry(args->dnode, NULL, true);
+ nh->bh_type = yang_dnode_get_enum(args->dnode, NULL);
+ break;
+ }
return NB_OK;
}
@@ -589,7 +623,7 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_pa
info = route_table_get_info(rn->table);
if (static_nexthop_create(args, rn_dnode, info) != NB_OK)
- return NB_ERR_VALIDATION;
+ return NB_ERR_INCONSISTENCY;
break;
}
return NB_OK;
@@ -626,17 +660,7 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_pa
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_bh_type_modify(
struct nb_cb_modify_args *args)
{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- break;
- case NB_EV_APPLY:
- if (static_nexthop_bh_type_modify(args) != NB_OK)
- return NB_ERR;
- break;
- }
- return NB_OK;
+ return static_nexthop_bh_type_modify(args);
}
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_bh_type_destroy(
@@ -662,33 +686,7 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_pa
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_onlink_modify(
struct nb_cb_modify_args *args)
{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- break;
- case NB_EV_APPLY:
- if (static_nexthop_onlink_modify(args) != NB_OK)
- return NB_ERR;
-
- break;
- }
- return NB_OK;
-}
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_onlink_destroy(
- struct nb_cb_destroy_args *args)
-{
- /* onlink has a boolean type with default value,
- * so no need to do any operations in destroy callback
- */
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- case NB_EV_APPLY:
- break;
- }
- return NB_OK;
+ return static_nexthop_onlink_modify(args);
}
/*
@@ -1042,17 +1040,7 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_sr
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_bh_type_modify(
struct nb_cb_modify_args *args)
{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- break;
- case NB_EV_APPLY:
- if (static_nexthop_bh_type_modify(args) != NB_OK)
- return NB_ERR;
- break;
- }
- return NB_OK;
+ return static_nexthop_bh_type_modify(args);
}
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_bh_type_destroy(
@@ -1079,35 +1067,7 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_sr
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_modify(
struct nb_cb_modify_args *args)
{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- break;
- case NB_EV_APPLY:
- if (static_nexthop_onlink_modify(args) != NB_OK)
- return NB_ERR;
-
- break;
- }
- return NB_OK;
-}
-
-
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_destroy(
- struct nb_cb_destroy_args *args)
-{
- /* onlink has a boolean type with default value,
- * so no need to do any operations in destroy callback
- */
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- case NB_EV_APPLY:
- break;
- }
- return NB_OK;
+ return static_nexthop_onlink_modify(args);
}
/*
diff --git a/tests/.gitignore b/tests/.gitignore
index b1b8f92a87..ca20b0ecac 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -47,6 +47,7 @@
/lib/test_ttable
/lib/test_typelist
/lib/test_versioncmp
+/lib/test_xref
/lib/test_zlog
/lib/test_zmq
/ospf6d/test_lsdb
diff --git a/tests/lib/test_xref.c b/tests/lib/test_xref.c
new file mode 100644
index 0000000000..700950de1f
--- /dev/null
+++ b/tests/lib/test_xref.c
@@ -0,0 +1,140 @@
+/*
+ * xref tests
+ * Copyright (C) 2020 David Lamparter for NetDEF, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+#include "xref.h"
+#include "log.h"
+
+/*
+ * "lib/test_xref.c" (only 1 directory component included)
+ * "logging call"
+ * 0x00000003 (network byte order - LOG_ERR)
+ * 0x00000000 (network byte order - EC / zero here)
+ *
+ * note there are no '\0' terminators included for the strings
+ *
+ * SHA256
+ * => 71a65ce6e81517f642c8f55fb2af6f181f7df54357913b5b577aa61a663fdd4c
+ * & 0f -> 0x01 'H'
+ * & f001 -> 0x07 '7'
+ * & 3e -> 0x13 'K'
+ * & c007 -> 0x12 'J'
+ * & f8 -> 0x0b 'B'
+ * etc.
+ * (for reference: base32ch[] = "0123456789ABCDEFGHJKMNPQRSTVWXYZ")
+ *
+ * (bits are consumed starting with the lowest bit, and the first character
+ * only consumes 4 bits and has the 5th bit at 1)
+ */
+
+static const char *expect_uid = "H7KJB-67TBH";
+static bool test_logcall(void)
+{
+ zlog_err("logging call");
+
+ return true;
+}
+
+static void check_xref(const struct xref *xref, bool *found, bool *error)
+{
+ const char *file = xref->file, *p;
+
+ p = strrchr(file, '/');
+ if (p)
+ file = p + 1;
+
+ if (strcmp(file, "test_xref.c"))
+ return;
+ if (xref->type != XREFT_LOGMSG)
+ return;
+ if (strcmp(xref->func, "test_logcall"))
+ return;
+
+ printf("xref: %s:%d %s() type=%d uid=%s\n",
+ xref->file, xref->line, xref->func, xref->type,
+ xref->xrefdata ? xref->xrefdata->uid : "--");
+
+ if (*found) {
+ printf("duplicate xref!\n");
+ *error = true;
+ }
+
+ const struct xref_logmsg *logmsg;
+
+ logmsg = container_of(xref, struct xref_logmsg, xref);
+ if (strcmp(logmsg->fmtstring, "logging call")) {
+ printf("log message mismatch!\n");
+ *error = true;
+ }
+ if (logmsg->priority != LOG_ERR || logmsg->ec != 0) {
+ printf("metadata mismatch!\n");
+ *error = true;
+ }
+
+ *found = true;
+
+ if (!xref->xrefdata) {
+ printf("no unique ID?\n");
+ *error = true;
+ return;
+ }
+
+ if (strcmp(xref->xrefdata->uid, expect_uid)) {
+ printf("unique ID mismatch, expected %s, got %s\n",
+ expect_uid, xref->xrefdata->uid);
+ *error = true;
+ }
+}
+
+static bool test_lookup(void)
+{
+ struct xref_block *xb;
+ bool found = false, error = false;
+
+ for (xb = xref_blocks; xb; xb = xb->next) {
+ const struct xref * const *xrefp;
+
+ for (xrefp = xb->start; xrefp < xb->stop; xrefp++) {
+ const struct xref *xref = *xrefp;
+
+ if (!xref)
+ continue;
+
+ check_xref(xref, &found, &error);
+ }
+ }
+ return found && !error;
+}
+
+bool (*tests[])(void) = {
+ test_lookup,
+ test_logcall,
+};
+
+XREF_SETUP()
+
+int main(int argc, char **argv)
+{
+ zlog_aux_init("NONE: ", ZLOG_DISABLED);
+
+ for (unsigned int i = 0; i < array_size(tests); i++)
+ if (!tests[i]())
+ return 1;
+ return 0;
+}
diff --git a/tests/lib/test_xref.py b/tests/lib/test_xref.py
new file mode 100644
index 0000000000..8c3db3e182
--- /dev/null
+++ b/tests/lib/test_xref.py
@@ -0,0 +1,6 @@
+import frrtest
+
+class TestXref(frrtest.TestMultiOut):
+ program = './test_xref'
+
+TestXref.exit_cleanly()
diff --git a/tests/ospfd/.gitignore b/tests/ospfd/.gitignore
new file mode 100644
index 0000000000..c659b645db
--- /dev/null
+++ b/tests/ospfd/.gitignore
@@ -0,0 +1,3 @@
+/*_afl/*
+test_ospf_spf
+core
diff --git a/tests/ospfd/common.c b/tests/ospfd/common.c
new file mode 100644
index 0000000000..eb30c4016e
--- /dev/null
+++ b/tests/ospfd/common.c
@@ -0,0 +1,248 @@
+#include <zebra.h>
+
+#include "lib/stream.h"
+#include "lib/vty.h"
+#include "lib/mpls.h"
+#include "lib/if.h"
+#include "lib/table.h"
+
+#include "ospfd/ospfd.h"
+#include "ospfd/ospf_route.h"
+#include "ospfd/ospf_spf.h"
+#include "ospfd/ospf_flood.h"
+#include "ospfd/ospf_lsa.h"
+#include "ospfd/ospf_lsdb.h"
+#include "ospfd/ospf_interface.h"
+#include "ospfd/ospf_sr.h"
+
+#include "common.h"
+
+struct thread_master *master;
+struct zebra_privs_t ospfd_privs;
+
+
+struct ospf_topology *test_find_topology(const char *name)
+{
+ if (strmatch(name, "topo1"))
+ return &topo1;
+ else if (strmatch(name, "topo2"))
+ return &topo2;
+ else if (strmatch(name, "topo3"))
+ return &topo3;
+ else if (strmatch(name, "topo4"))
+ return &topo4;
+ else if (strmatch(name, "topo5"))
+ return &topo5;
+
+ return NULL;
+}
+
+int sort_paths(const void **path1, const void **path2)
+{
+ const struct ospf_path *p1 = *path1;
+ const struct ospf_path *p2 = *path2;
+
+ return (p1->nexthop.s_addr - p2->nexthop.s_addr);
+}
+
+void print_route_table(struct vty *vty, struct route_table *rt)
+{
+ struct route_node *rn;
+ struct ospf_route * or ;
+ struct listnode *pnode;
+ struct ospf_path *path;
+ struct mpls_label_stack *label_stack;
+ char buf[MPLS_LABEL_STRLEN];
+
+ for (rn = route_top(rt); rn; rn = route_next(rn)) {
+ if ((or = rn->info) == NULL)
+ continue;
+
+ vty_out(vty, "N %-18pFX %-15pI4 %d\n", &rn->p,
+ & or->u.std.area_id, or->cost);
+
+ list_sort(or->paths, sort_paths);
+
+ for (ALL_LIST_ELEMENTS_RO(or->paths, pnode, path)) {
+ if (path->nexthop.s_addr == 0)
+ continue;
+
+ vty_out(vty, " -> %pI4 with adv router %pI4",
+ &path->nexthop, &path->adv_router);
+
+ if (path->srni.backup_label_stack) {
+ label_stack = path->srni.backup_label_stack;
+ mpls_label2str(label_stack->num_labels,
+ label_stack->label, buf,
+ MPLS_LABEL_STRLEN, true);
+ vty_out(vty, " and backup path %s", buf);
+ }
+ vty_out(vty, "\n");
+ }
+ }
+}
+
+struct ospf_test_node *test_find_node(struct ospf_topology *topology,
+ const char *hostname)
+{
+ for (int i = 0; topology->nodes[i].hostname[0]; i++)
+ if (strmatch(hostname, topology->nodes[i].hostname))
+ return &topology->nodes[i];
+
+ return NULL;
+}
+
+static void inject_router_lsa(struct vty *vty, struct ospf *ospf,
+ struct ospf_topology *topology,
+ struct ospf_test_node *root,
+ struct ospf_test_node *tnode)
+{
+ struct ospf_area *area;
+ struct in_addr router_id;
+ struct in_addr adj_router_id;
+ struct prefix_ipv4 prefix;
+ struct in_addr data;
+ struct stream *s;
+ struct lsa_header *lsah;
+ struct ospf_lsa *new;
+ int length;
+ unsigned long putp;
+ uint16_t link_count;
+ struct ospf_test_node *tfound_adj_node;
+ struct ospf_test_adj *tadj;
+ bool is_self_lsa = false;
+
+ area = ospf->backbone;
+ inet_aton(tnode->router_id, &router_id);
+
+ if (strncmp(root->router_id, tnode->router_id, 256) == 0)
+ is_self_lsa = true;
+
+ s = stream_new(OSPF_MAX_LSA_SIZE);
+ lsa_header_set(s, LSA_OPTIONS_GET(area) | LSA_OPTIONS_NSSA_GET(area),
+ OSPF_ROUTER_LSA, router_id, router_id);
+
+ stream_putc(s, router_lsa_flags(area));
+ stream_putc(s, 0);
+
+ putp = stream_get_endp(s);
+ stream_putw(s, 0);
+
+ for (link_count = 0; tnode->adjacencies[link_count].hostname[0];
+ link_count++) {
+ tadj = &tnode->adjacencies[link_count];
+ tfound_adj_node = test_find_node(topology, tadj->hostname);
+ str2prefix_ipv4(tnode->adjacencies[link_count].network,
+ &prefix);
+
+ inet_aton(tfound_adj_node->router_id, &adj_router_id);
+ data.s_addr = prefix.prefix.s_addr;
+ link_info_set(&s, adj_router_id, data,
+ LSA_LINK_TYPE_POINTOPOINT, 0, tadj->metric);
+
+ masklen2ip(prefix.prefixlen, &data);
+ link_info_set(&s, prefix.prefix, data, LSA_LINK_TYPE_STUB, 0,
+ tadj->metric);
+ }
+
+ /* Don't forget the node itself (just a stub) */
+ str2prefix_ipv4(tnode->router_id, &prefix);
+ data.s_addr = 0xffffffff;
+ link_info_set(&s, prefix.prefix, data, LSA_LINK_TYPE_STUB, 0, 0);
+
+ /* Take twice the link count (for P2P and stub) plus the local stub */
+ stream_putw_at(s, putp, (2 * link_count) + 1);
+
+ length = stream_get_endp(s);
+ lsah = (struct lsa_header *)STREAM_DATA(s);
+ lsah->length = htons(length);
+
+ new = ospf_lsa_new_and_data(length);
+ new->area = area;
+ new->vrf_id = area->ospf->vrf_id;
+
+ if (is_self_lsa)
+ SET_FLAG(new->flags, OSPF_LSA_SELF | OSPF_LSA_SELF_CHECKED);
+
+ memcpy(new->data, lsah, length);
+ stream_free(s);
+
+ ospf_lsdb_add(area->lsdb, new);
+
+ if (is_self_lsa) {
+ ospf_lsa_unlock(&area->router_lsa_self);
+ area->router_lsa_self = ospf_lsa_lock(new);
+ }
+}
+
+static void inject_sr_db_entry(struct vty *vty, struct ospf_test_node *tnode,
+ struct ospf_topology *topology)
+{
+ struct ospf_test_node *tfound_adj_node;
+ struct ospf_test_adj *tadj;
+ struct in_addr router_id;
+ struct in_addr remote_id;
+ struct sr_node *srn;
+ struct sr_prefix *srp;
+ struct sr_link *srl;
+ int link_count;
+
+ inet_aton(tnode->router_id, &router_id);
+
+ srn = ospf_sr_node_create(&router_id);
+
+ srn->srgb.range_size = 8000;
+ srn->srgb.lower_bound = 16000;
+ srn->msd = 16;
+
+ srn->srlb.range_size = 1000;
+ srn->srlb.lower_bound = 15000;
+
+ /* Prefix SID */
+ srp = XCALLOC(MTYPE_OSPF_SR_PARAMS, sizeof(struct sr_prefix));
+ srp->adv_router = router_id;
+ srp->sid = tnode->label;
+ srp->srn = srn;
+
+ listnode_add(srn->ext_prefix, srp);
+
+ /* Adjacency SIDs for all adjacencies */
+ for (link_count = 0; tnode->adjacencies[link_count].hostname[0];
+ link_count++) {
+ tadj = &tnode->adjacencies[link_count];
+ tfound_adj_node = test_find_node(topology, tadj->hostname);
+
+ srl = XCALLOC(MTYPE_OSPF_SR_PARAMS, sizeof(struct sr_link));
+ srl->adv_router = router_id;
+
+ inet_aton(tfound_adj_node->router_id, &remote_id);
+ srl->remote_id = remote_id;
+
+ srl->type = ADJ_SID;
+ srl->sid[0] = srn->srlb.lower_bound + tadj->label;
+ srl->srn = srn;
+
+ listnode_add(srn->ext_link, srl);
+ }
+}
+
+int topology_load(struct vty *vty, struct ospf_topology *topology,
+ struct ospf_test_node *root, struct ospf *ospf)
+{
+ struct ospf_test_node *tnode;
+
+ for (int i = 0; topology->nodes[i].hostname[0]; i++) {
+ tnode = &topology->nodes[i];
+
+ /* Inject a router LSA for each node, used for SPF */
+ inject_router_lsa(vty, ospf, topology, root, tnode);
+
+ /*
+ * SR information could also be inected via LSAs, but directly
+ * filling the SR DB with labels is just easier.
+ */
+ inject_sr_db_entry(vty, tnode, topology);
+ }
+
+ return 0;
+}
diff --git a/tests/ospfd/common.h b/tests/ospfd/common.h
new file mode 100644
index 0000000000..6d3e63e359
--- /dev/null
+++ b/tests/ospfd/common.h
@@ -0,0 +1,47 @@
+#ifndef _COMMON_OSPF_H
+#define _COMMON_OSPF_H
+
+#define MAX_ADJACENCIES 8
+#define MAX_NODES 12
+
+struct ospf_test_adj {
+ char hostname[256];
+ char network[256];
+ uint32_t metric;
+ mpls_label_t label;
+};
+
+struct ospf_test_node {
+ char hostname[256];
+ const char *router_id;
+ mpls_label_t label;
+ struct ospf_test_adj adjacencies[MAX_ADJACENCIES + 1];
+};
+
+struct ospf_topology {
+ struct ospf_test_node nodes[MAX_NODES + 1];
+};
+
+/* Prototypes. */
+extern struct ospf_topology *test_find_topology(const char *name);
+extern struct ospf_test_node *test_find_node(struct ospf_topology *topology,
+ const char *hostname);
+extern int topology_load(struct vty *vty, struct ospf_topology *topology,
+ struct ospf_test_node *root, struct ospf *ospf);
+
+/* Global variables. */
+extern struct thread_master *master;
+extern struct ospf_topology topo1;
+extern struct ospf_topology topo2;
+extern struct ospf_topology topo3;
+extern struct ospf_topology topo4;
+extern struct ospf_topology topo5;
+extern struct zebra_privs_t ospfd_privs;
+
+/* For stable order in unit tests */
+extern int sort_paths(const void **path1, const void **path2);
+
+/* Print the routing table */
+extern void print_route_table(struct vty *vty, struct route_table *rt);
+
+#endif /* _COMMON_OSPF_H */
diff --git a/tests/ospfd/test_ospf_spf.c b/tests/ospfd/test_ospf_spf.c
new file mode 100644
index 0000000000..a85f7e14ec
--- /dev/null
+++ b/tests/ospfd/test_ospf_spf.c
@@ -0,0 +1,303 @@
+#include <zebra.h>
+
+#include "getopt.h"
+#include "thread.h"
+#include <lib/version.h>
+#include "vty.h"
+#include "command.h"
+#include "log.h"
+#include "vrf.h"
+#include "table.h"
+#include "mpls.h"
+
+#include "ospfd/ospfd.h"
+#include "ospfd/ospf_asbr.h"
+#include "ospfd/ospf_lsa.h"
+#include "ospfd/ospf_route.h"
+#include "ospfd/ospf_spf.h"
+#include "ospfd/ospf_ti_lfa.h"
+#include "ospfd/ospf_vty.h"
+#include "ospfd/ospf_dump.h"
+#include "ospfd/ospf_sr.h"
+
+#include "common.h"
+
+DECLARE_RBTREE_UNIQ(p_spaces, struct p_space, p_spaces_item,
+ p_spaces_compare_func)
+DECLARE_RBTREE_UNIQ(q_spaces, struct q_space, q_spaces_item,
+ q_spaces_compare_func)
+
+static struct ospf *test_init(struct ospf_test_node *root)
+{
+ struct ospf *ospf;
+ struct ospf_area *area;
+ struct in_addr area_id;
+ struct in_addr router_id;
+
+ ospf = ospf_new_alloc(0, NULL);
+
+ area_id.s_addr = OSPF_AREA_BACKBONE;
+ area = ospf_area_new(ospf, area_id);
+ listnode_add_sort(ospf->areas, area);
+
+ inet_aton(root->router_id, &router_id);
+ ospf->router_id = router_id;
+ ospf->router_id_static = router_id;
+ ospf->ti_lfa_enabled = true;
+
+ return ospf;
+}
+
+static void test_run_spf(struct vty *vty, struct ospf *ospf,
+ enum protection_type protection_type, bool verbose)
+{
+ struct route_table *new_table, *new_rtrs;
+ struct ospf_area *area;
+ struct p_space *p_space;
+ struct q_space *q_space;
+ char label_buf[MPLS_LABEL_STRLEN];
+ char res_buf[PROTECTED_RESOURCE_STRLEN];
+
+ /* Just use the backbone for testing */
+ area = ospf->backbone;
+
+ new_table = route_table_init();
+ new_rtrs = route_table_init();
+
+ /* dryrun true, root_node false */
+ ospf_spf_calculate(area, area->router_lsa_self, new_table, new_rtrs,
+ true, false);
+
+ if (verbose) {
+ vty_out(vty, "SPF Tree without TI-LFA backup paths:\n\n");
+ ospf_spf_print(vty, area->spf, 0);
+
+ vty_out(vty,
+ "\nRouting Table without TI-LFA backup paths:\n\n");
+ print_route_table(vty, new_table);
+ }
+
+ if (verbose)
+ vty_out(vty, "\n... generating TI-LFA backup paths ...\n");
+
+ /* TI-LFA testrun */
+ ospf_ti_lfa_generate_p_spaces(area, protection_type);
+ ospf_ti_lfa_insert_backup_paths(area, new_table);
+
+ /* Print P/Q space information */
+ if (verbose) {
+ vty_out(vty, "\nP and Q space info:\n");
+ frr_each (p_spaces, area->p_spaces, p_space) {
+ ospf_print_protected_resource(
+ p_space->protected_resource, res_buf);
+ vty_out(vty, "\nP Space for root %pI4 and %s\n",
+ &p_space->root->id, res_buf);
+ ospf_spf_print(vty, p_space->root, 0);
+
+ frr_each (q_spaces, p_space->q_spaces, q_space) {
+ vty_out(vty,
+ "\nQ Space for destination %pI4:\n",
+ &q_space->root->id);
+ ospf_spf_print(vty, q_space->root, 0);
+ if (q_space->label_stack) {
+ mpls_label2str(
+ q_space->label_stack
+ ->num_labels,
+ q_space->label_stack->label,
+ label_buf, MPLS_LABEL_STRLEN,
+ true);
+ vty_out(vty, "\nLabel stack: %s\n",
+ label_buf);
+ } else {
+ vty_out(vty,
+ "\nLabel stack not generated!\n");
+ }
+ }
+
+ vty_out(vty, "\nPost-convergence SPF Tree:\n");
+ ospf_spf_print(vty, p_space->pc_spf, 0);
+ }
+ }
+
+ /* Cleanup */
+ ospf_ti_lfa_free_p_spaces(area);
+ ospf_spf_cleanup(area->spf, area->spf_vertex_list);
+
+ /*
+ * Print the new routing table which is augmented with TI-LFA backup
+ * paths (label stacks).
+ */
+ if (verbose)
+ vty_out(vty,
+ "\n\nFinal Routing Table including backup paths:\n\n");
+
+ print_route_table(vty, new_table);
+}
+
+static int test_run(struct vty *vty, struct ospf_topology *topology,
+ struct ospf_test_node *root,
+ enum protection_type protection_type, bool verbose)
+{
+ struct ospf *ospf;
+
+ ospf = test_init(root);
+
+ /* Inject LSAs into the OSPF backbone according to the topology */
+ if (topology_load(vty, topology, root, ospf)) {
+ vty_out(vty, "%% Failed to load topology\n");
+ return CMD_WARNING;
+ }
+
+ if (verbose) {
+ vty_out(vty, "\n");
+ show_ip_ospf_database_summary(vty, ospf, 0, NULL);
+ }
+
+ test_run_spf(vty, ospf, protection_type, verbose);
+
+ return 0;
+}
+
+DEFUN(test_ospf, test_ospf_cmd,
+ "test ospf topology WORD root HOSTNAME ti-lfa [node-protection] [verbose]",
+ "Test mode\n"
+ "Choose OSPF for SPF testing\n"
+ "Network topology to choose\n"
+ "Name of the network topology to choose\n"
+ "Root node to choose\n"
+ "Hostname of the root node to choose\n"
+ "Use Topology-Independent LFA\n"
+ "Use node protection (default is link protection)\n"
+ "Verbose output\n")
+{
+ struct ospf_topology *topology;
+ struct ospf_test_node *root;
+ enum protection_type protection_type = OSPF_TI_LFA_LINK_PROTECTION;
+ int idx = 0;
+ bool verbose = false;
+
+ /* Parse topology. */
+ argv_find(argv, argc, "topology", &idx);
+ topology = test_find_topology(argv[idx + 1]->arg);
+ if (!topology) {
+ vty_out(vty, "%% Topology not found\n");
+ return CMD_WARNING;
+ }
+
+ argv_find(argv, argc, "root", &idx);
+ root = test_find_node(topology, argv[idx + 1]->arg);
+ if (!root) {
+ vty_out(vty, "%% Root not found\n");
+ return CMD_WARNING;
+ }
+
+ if (argv_find(argv, argc, "node-protection", &idx))
+ protection_type = OSPF_TI_LFA_NODE_PROTECTION;
+
+ if (argv_find(argv, argc, "verbose", &idx))
+ verbose = true;
+
+ return test_run(vty, topology, root, protection_type, verbose);
+}
+
+static void vty_do_exit(int isexit)
+{
+ printf("\nend.\n");
+
+ cmd_terminate();
+ vty_terminate();
+ thread_master_free(master);
+
+ if (!isexit)
+ exit(0);
+}
+
+struct option longopts[] = {{"help", no_argument, NULL, 'h'},
+ {"debug", no_argument, NULL, 'd'},
+ {0} };
+
+/* Help information display. */
+static void usage(char *progname, int status)
+{
+ if (status != 0)
+ fprintf(stderr, "Try `%s --help' for more information.\n",
+ progname);
+ else {
+ printf("Usage : %s [OPTION...]\n\
+ospfd SPF test program.\n\n\
+-u, --debug Enable debugging\n\
+-h, --help Display this help and exit\n\
+\n\
+Report bugs to %s\n",
+ progname, FRR_BUG_ADDRESS);
+ }
+ exit(status);
+}
+
+int main(int argc, char **argv)
+{
+ char *p;
+ char *progname;
+ struct thread thread;
+ bool debug = false;
+
+ /* Set umask before anything for security */
+ umask(0027);
+
+ /* get program name */
+ progname = ((p = strrchr(argv[0], '/')) ? ++p : argv[0]);
+
+ while (1) {
+ int opt;
+
+ opt = getopt_long(argc, argv, "hd", longopts, 0);
+
+ if (opt == EOF)
+ break;
+
+ switch (opt) {
+ case 0:
+ break;
+ case 'd':
+ debug = true;
+ break;
+ case 'h':
+ usage(progname, 0);
+ break;
+ default:
+ usage(progname, 1);
+ break;
+ }
+ }
+
+ /* master init. */
+ master = thread_master_create(NULL);
+
+ /* Library inits. */
+ cmd_init(1);
+ cmd_hostname_set("test");
+ vty_init(master, false);
+ if (debug)
+ zlog_aux_init("NONE: ", LOG_DEBUG);
+ else
+ zlog_aux_init("NONE: ", ZLOG_DISABLED);
+
+ /* Install test command. */
+ install_element(VIEW_NODE, &test_ospf_cmd);
+
+ /* needed for SR DB init */
+ ospf_vty_init();
+ ospf_sr_init();
+
+ term_debug_ospf_ti_lfa = 1;
+
+ /* Read input from .in file. */
+ vty_stdio(vty_do_exit);
+
+ /* Fetch next active thread. */
+ while (thread_fetch(master, &thread))
+ thread_call(&thread);
+
+ /* Not reached. */
+ exit(0);
+}
diff --git a/tests/ospfd/test_ospf_spf.in b/tests/ospfd/test_ospf_spf.in
new file mode 100644
index 0000000000..f1e746745f
--- /dev/null
+++ b/tests/ospfd/test_ospf_spf.in
@@ -0,0 +1,10 @@
+test ospf topology topo1 root rt1 ti-lfa
+test ospf topology topo1 root rt1 ti-lfa node-protection
+test ospf topology topo2 root rt1 ti-lfa
+test ospf topology topo2 root rt1 ti-lfa node-protection
+test ospf topology topo3 root rt1 ti-lfa
+test ospf topology topo3 root rt1 ti-lfa node-protection
+test ospf topology topo4 root rt1 ti-lfa
+test ospf topology topo4 root rt1 ti-lfa node-protection
+test ospf topology topo5 root rt1 ti-lfa
+test ospf topology topo5 root rt1 ti-lfa node-protection
diff --git a/tests/ospfd/test_ospf_spf.py b/tests/ospfd/test_ospf_spf.py
new file mode 100644
index 0000000000..92a1c6a145
--- /dev/null
+++ b/tests/ospfd/test_ospf_spf.py
@@ -0,0 +1,4 @@
+import frrtest
+
+class TestOspfSPF(frrtest.TestRefOut):
+ program = './test_ospf_spf'
diff --git a/tests/ospfd/test_ospf_spf.refout b/tests/ospfd/test_ospf_spf.refout
new file mode 100644
index 0000000000..d1e3c7bc65
--- /dev/null
+++ b/tests/ospfd/test_ospf_spf.refout
@@ -0,0 +1,130 @@
+test# test ospf topology topo1 root rt1 ti-lfa
+N 1.1.1.1/32 0.0.0.0 0
+N 2.2.2.2/32 0.0.0.0 10
+ -> 10.0.1.2 with adv router 2.2.2.2 and backup path 15002
+N 3.3.3.3/32 0.0.0.0 10
+ -> 10.0.3.2 with adv router 3.3.3.3 and backup path 15001
+N 10.0.1.0/24 0.0.0.0 10
+N 10.0.2.0/24 0.0.0.0 20
+ -> 10.0.1.2 with adv router 2.2.2.2 and backup path 15002
+ -> 10.0.3.2 with adv router 3.3.3.3 and backup path 15001
+N 10.0.3.0/24 0.0.0.0 10
+test# test ospf topology topo1 root rt1 ti-lfa node-protection
+N 1.1.1.1/32 0.0.0.0 0
+N 2.2.2.2/32 0.0.0.0 10
+ -> 10.0.1.2 with adv router 2.2.2.2
+N 3.3.3.3/32 0.0.0.0 10
+ -> 10.0.3.2 with adv router 3.3.3.3
+N 10.0.1.0/24 0.0.0.0 10
+N 10.0.2.0/24 0.0.0.0 20
+ -> 10.0.1.2 with adv router 2.2.2.2
+ -> 10.0.3.2 with adv router 3.3.3.3
+N 10.0.3.0/24 0.0.0.0 10
+test# test ospf topology topo2 root rt1 ti-lfa
+N 1.1.1.1/32 0.0.0.0 0
+N 2.2.2.2/32 0.0.0.0 10
+ -> 10.0.1.2 with adv router 2.2.2.2 and backup path 15002
+N 3.3.3.3/32 0.0.0.0 20
+ -> 10.0.1.2 with adv router 3.3.3.3 and backup path 15002
+N 10.0.1.0/24 0.0.0.0 10
+N 10.0.2.0/24 0.0.0.0 20
+ -> 10.0.1.2 with adv router 2.2.2.2 and backup path 15002
+N 10.0.3.0/24 0.0.0.0 30
+test# test ospf topology topo2 root rt1 ti-lfa node-protection
+N 1.1.1.1/32 0.0.0.0 0
+N 2.2.2.2/32 0.0.0.0 10
+ -> 10.0.1.2 with adv router 2.2.2.2
+N 3.3.3.3/32 0.0.0.0 20
+ -> 10.0.1.2 with adv router 3.3.3.3 and backup path 15002
+N 10.0.1.0/24 0.0.0.0 10
+N 10.0.2.0/24 0.0.0.0 20
+ -> 10.0.1.2 with adv router 2.2.2.2
+N 10.0.3.0/24 0.0.0.0 30
+test# test ospf topology topo3 root rt1 ti-lfa
+N 1.1.1.1/32 0.0.0.0 0
+N 2.2.2.2/32 0.0.0.0 10
+ -> 10.0.1.2 with adv router 2.2.2.2 and backup path 16030
+N 3.3.3.3/32 0.0.0.0 20
+ -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001
+N 4.4.4.4/32 0.0.0.0 10
+ -> 10.0.4.2 with adv router 4.4.4.4 and backup path 15001/15004
+N 10.0.1.0/24 0.0.0.0 10
+N 10.0.2.0/24 0.0.0.0 30
+ -> 10.0.1.2 with adv router 2.2.2.2 and backup path 16030
+N 10.0.3.0/24 0.0.0.0 20
+ -> 10.0.4.2 with adv router 4.4.4.4 and backup path 15001/15004
+N 10.0.4.0/24 0.0.0.0 10
+test# test ospf topology topo3 root rt1 ti-lfa node-protection
+N 1.1.1.1/32 0.0.0.0 0
+N 2.2.2.2/32 0.0.0.0 10
+ -> 10.0.1.2 with adv router 2.2.2.2
+N 3.3.3.3/32 0.0.0.0 20
+ -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001
+N 4.4.4.4/32 0.0.0.0 10
+ -> 10.0.4.2 with adv router 4.4.4.4
+N 10.0.1.0/24 0.0.0.0 10
+N 10.0.2.0/24 0.0.0.0 30
+ -> 10.0.1.2 with adv router 2.2.2.2
+N 10.0.3.0/24 0.0.0.0 20
+ -> 10.0.4.2 with adv router 4.4.4.4
+N 10.0.4.0/24 0.0.0.0 10
+test# test ospf topology topo4 root rt1 ti-lfa
+N 1.1.1.1/32 0.0.0.0 0
+N 2.2.2.2/32 0.0.0.0 10
+ -> 10.0.1.2 with adv router 2.2.2.2 and backup path 16030/15006
+N 3.3.3.3/32 0.0.0.0 20
+ -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001/15004
+N 4.4.4.4/32 0.0.0.0 10
+ -> 10.0.4.2 with adv router 4.4.4.4 and backup path 15001/15004
+N 10.0.1.0/24 0.0.0.0 10
+N 10.0.2.0/24 0.0.0.0 60
+ -> 10.0.1.2 with adv router 2.2.2.2 and backup path 16030/15006
+N 10.0.3.0/24 0.0.0.0 20
+ -> 10.0.4.2 with adv router 4.4.4.4 and backup path 15001/15004
+N 10.0.4.0/24 0.0.0.0 10
+test# test ospf topology topo4 root rt1 ti-lfa node-protection
+N 1.1.1.1/32 0.0.0.0 0
+N 2.2.2.2/32 0.0.0.0 10
+ -> 10.0.1.2 with adv router 2.2.2.2
+N 3.3.3.3/32 0.0.0.0 20
+ -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001/15004
+N 4.4.4.4/32 0.0.0.0 10
+ -> 10.0.4.2 with adv router 4.4.4.4
+N 10.0.1.0/24 0.0.0.0 10
+N 10.0.2.0/24 0.0.0.0 60
+ -> 10.0.1.2 with adv router 2.2.2.2
+N 10.0.3.0/24 0.0.0.0 20
+ -> 10.0.4.2 with adv router 4.4.4.4
+N 10.0.4.0/24 0.0.0.0 10
+test# test ospf topology topo5 root rt1 ti-lfa
+N 1.1.1.1/32 0.0.0.0 0
+N 2.2.2.2/32 0.0.0.0 30
+ -> 10.0.4.2 with adv router 2.2.2.2 and backup path 15001
+N 3.3.3.3/32 0.0.0.0 20
+ -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001/15004
+N 4.4.4.4/32 0.0.0.0 10
+ -> 10.0.4.2 with adv router 4.4.4.4 and backup path 15001/15004/15006
+N 10.0.1.0/24 0.0.0.0 40
+ -> 10.0.4.2 with adv router 2.2.2.2 and backup path 15001
+N 10.0.2.0/24 0.0.0.0 30
+ -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001/15004
+N 10.0.3.0/24 0.0.0.0 20
+ -> 10.0.4.2 with adv router 4.4.4.4 and backup path 15001/15004/15006
+N 10.0.4.0/24 0.0.0.0 10
+test# test ospf topology topo5 root rt1 ti-lfa node-protection
+N 1.1.1.1/32 0.0.0.0 0
+N 2.2.2.2/32 0.0.0.0 30
+ -> 10.0.4.2 with adv router 2.2.2.2 and backup path 15001
+N 3.3.3.3/32 0.0.0.0 20
+ -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001/15004
+N 4.4.4.4/32 0.0.0.0 10
+ -> 10.0.4.2 with adv router 4.4.4.4
+N 10.0.1.0/24 0.0.0.0 40
+ -> 10.0.4.2 with adv router 2.2.2.2 and backup path 15001
+N 10.0.2.0/24 0.0.0.0 30
+ -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001/15004
+N 10.0.3.0/24 0.0.0.0 20
+ -> 10.0.4.2 with adv router 4.4.4.4
+N 10.0.4.0/24 0.0.0.0 10
+test#
+end.
diff --git a/tests/ospfd/topologies.c b/tests/ospfd/topologies.c
new file mode 100644
index 0000000000..2dc611ce96
--- /dev/null
+++ b/tests/ospfd/topologies.c
@@ -0,0 +1,575 @@
+#include <zebra.h>
+
+#include "mpls.h"
+#include "if.h"
+
+#include "ospfd/ospfd.h"
+
+#include "common.h"
+
+/*
+ * +---------+ +---------+
+ * | | | |
+ * | RT1 |eth-rt2 eth-rt1| RT2 |
+ * | 1.1.1.1 +---------------------+ 2.2.2.2 |
+ * | | 10.0.1.0/24 | |
+ * +---------+ +---------+
+ * |eth-rt3 eth-rt3|
+ * | |
+ * |10.0.3.0/24 |
+ * | |
+ * |eth-rt1 |
+ * +---------+ |
+ * | |eth-rt2 10.0.2.0/24|
+ * | RT3 +--------------------------+
+ * | 3.3.3.3 |
+ * | |
+ * +---------+
+ *
+ * Link Protection:
+ * P and Q spaces overlap here, hence just one P/Q node regardless of which
+ * link is protected. Hence the backup label stack just has one label.
+ *
+ * Node Protection:
+ * Obviously no backup paths involved.
+ */
+struct ospf_topology topo1 = {
+ .nodes =
+ {
+ {
+ .hostname = "rt1",
+ .router_id = "1.1.1.1",
+ .label = 10,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt2",
+ .network =
+ "10.0.1.1/24",
+ .metric = 10,
+ .label = 1,
+ },
+ {
+ .hostname = "rt3",
+ .network =
+ "10.0.3.1/24",
+ .metric = 10,
+ .label = 2,
+ },
+ },
+ },
+ {
+ .hostname = "rt2",
+ .router_id = "2.2.2.2",
+ .label = 20,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt1",
+ .network =
+ "10.0.1.2/24",
+ .metric = 10,
+ .label = 3,
+ },
+ {
+ .hostname = "rt3",
+ .network =
+ "10.0.2.1/24",
+ .metric = 10,
+ .label = 4,
+ },
+ },
+ },
+ {
+ .hostname = "rt3",
+ .router_id = "3.3.3.3",
+ .label = 30,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt1",
+ .network =
+ "10.0.3.2/24",
+ .metric = 10,
+ .label = 5,
+ },
+ {
+ .hostname = "rt2",
+ .network =
+ "10.0.2.2/24",
+ .metric = 10,
+ .label = 6,
+ },
+ },
+ },
+ },
+};
+
+
+/*
+ * +---------+ +---------+
+ * | | | |
+ * | RT1 |eth-rt2 eth-rt1| RT2 |
+ * | 1.1.1.1 +---------------------+ 2.2.2.2 |
+ * | | 10.0.1.0/24 (10) | |
+ * +---------+ +---------+
+ * |eth-rt3 eth-rt3|
+ * | |
+ * |10.0.3.0/24 (30) |
+ * | |
+ * |eth-rt1 |
+ * +---------+ |
+ * | |eth-rt2 10.0.2.0/24|(10)
+ * | RT3 +--------------------------+
+ * | 3.3.3.3 |
+ * | |
+ * +---------+
+ *
+ * Link Protection:
+ * Regarding the subnet 10.0.1.0/24, the P space of RT1 is just RT1 itself
+ * while the Q space of RT3 consists of RT3 and RT2. Hence the P and Q
+ * nodes are disjunct (tricky: the root node is the P node here). For the
+ * backup label stack just one label is necessary.
+ *
+ * Node Protection:
+ * For protected node RT2 and route from RT1 to RT3 there is just the backup
+ * path consisting of the label 15002.
+ */
+struct ospf_topology topo2 = {
+ .nodes =
+ {
+ {
+ .hostname = "rt1",
+ .router_id = "1.1.1.1",
+ .label = 10,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt2",
+ .network =
+ "10.0.1.1/24",
+ .metric = 10,
+ .label = 1,
+ },
+ {
+ .hostname = "rt3",
+ .network =
+ "10.0.3.1/24",
+ .metric = 30,
+ .label = 2,
+ },
+ },
+ },
+ {
+ .hostname = "rt2",
+ .router_id = "2.2.2.2",
+ .label = 20,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt1",
+ .network =
+ "10.0.1.2/24",
+ .metric = 10,
+ .label = 3,
+ },
+ {
+ .hostname = "rt3",
+ .network =
+ "10.0.2.1/24",
+ .metric = 10,
+ .label = 4,
+ },
+ },
+ },
+ {
+ .hostname = "rt3",
+ .router_id = "3.3.3.3",
+ .label = 30,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt1",
+ .network =
+ "10.0.3.2/24",
+ .metric = 30,
+ .label = 5,
+ },
+ {
+ .hostname = "rt2",
+ .network =
+ "10.0.2.2/24",
+ .metric = 10,
+ .label = 6,
+ },
+ },
+ },
+ },
+};
+
+/*
+ * +---------+ +---------+
+ * | | | |
+ * | RT1 |eth-rt4 eth-rt1| RT4 |
+ * | 1.1.1.1 +---------------------+ 4.4.4.4 |
+ * | | 10.0.4.0/24 (10) | |
+ * +---------+ +---------+
+ * |eth-rt2 eth-rt3|
+ * | |
+ * |10.0.1.0/24 (10) |
+ * | 10.0.3.0/24 (10) |
+ * |eth-rt1 eth-rt4|
+ * +---------+ +---------+
+ * | |eth-rt3 eth-rt2| |
+ * | RT2 +---------------------+ RT3 |
+ * | 2.2.2.2 | 10.0.2.0/24 (20) | 3.3.3.3 |
+ * | | | |
+ * +---------+ +---------+
+ *
+ * Link Protection:
+ * Regarding the protected subnet 10.0.4.0/24, the P and Q spaces for root RT1
+ * and destination RT4 are disjunct and the P node is RT2 while RT3 is the Q
+ * node. Hence the backup label stack here is 16020/15004. Note that here the
+ * P and Q nodes are neither the root nor the destination nodes, so this is a
+ * case where you really need a label stack consisting of two labels.
+ *
+ * Node Protection:
+ * For the protected node RT4 and the route from RT1 to RT3 there is a backup
+ * path with the single label 15001.
+ */
+struct ospf_topology topo3 = {
+ .nodes =
+ {
+ {
+ .hostname = "rt1",
+ .router_id = "1.1.1.1",
+ .label = 10,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt2",
+ .network =
+ "10.0.1.1/24",
+ .metric = 10,
+ .label = 1,
+ },
+ {
+ .hostname = "rt4",
+ .network =
+ "10.0.4.1/24",
+ .metric = 10,
+ .label = 2,
+ },
+ },
+ },
+ {
+ .hostname = "rt2",
+ .router_id = "2.2.2.2",
+ .label = 20,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt1",
+ .network =
+ "10.0.1.2/24",
+ .metric = 10,
+ .label = 3,
+ },
+ {
+ .hostname = "rt3",
+ .network =
+ "10.0.2.1/24",
+ .metric = 20,
+ .label = 4,
+ },
+ },
+ },
+ {
+ .hostname = "rt3",
+ .router_id = "3.3.3.3",
+ .label = 30,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt2",
+ .network =
+ "10.0.2.2/24",
+ .metric = 20,
+ .label = 5,
+ },
+ {
+ .hostname = "rt4",
+ .network =
+ "10.0.3.1/24",
+ .metric = 10,
+ .label = 6,
+ },
+ },
+ },
+ {
+ .hostname = "rt4",
+ .router_id = "4.4.4.4",
+ .label = 40,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt1",
+ .network =
+ "10.0.4.2/24",
+ .metric = 10,
+ .label = 7,
+ },
+ {
+ .hostname = "rt3",
+ .network =
+ "10.0.3.2/24",
+ .metric = 10,
+ .label = 8,
+ },
+ },
+ },
+ },
+};
+
+/*
+ * +---------+ +---------+
+ * | | | |
+ * | RT1 |eth-rt4 eth-rt1| RT4 |
+ * | 1.1.1.1 +---------------------+ 4.4.4.4 |
+ * | | 10.0.4.0/24 (10) | |
+ * +---------+ +---------+
+ * |eth+rt2 eth-rt3|
+ * | |
+ * |10.0.1.0/24 (10) |
+ * | 10.0.3.0/24 (10) |
+ * |eth-rt1 eth-rt4|
+ * +---------+ +---------+
+ * | |eth-rt3 eth-rt2| |
+ * | RT2 +---------------------+ RT3 |
+ * | 2.2.2.2 | 10.0.2.0/24 (40) | 3.3.3.3 |
+ * | | | |
+ * +---------+ +---------+
+ *
+ * This case was specifically created for Node Protection with RT4 as
+ * protected node from the perspective of RT1. Note the weight of 40
+ * on the link between RT2 and RT3.
+ * The P space of RT1 is just RT2 while the Q space of RT3 is empty.
+ * This means that the P and Q spaces are disjunct and there are two
+ * labels needed to get from RT1 to RT3.
+ */
+struct ospf_topology topo4 = {
+ .nodes =
+ {
+ {
+ .hostname = "rt1",
+ .router_id = "1.1.1.1",
+ .label = 10,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt2",
+ .network =
+ "10.0.1.1/24",
+ .metric = 10,
+ .label = 1,
+ },
+ {
+ .hostname = "rt4",
+ .network =
+ "10.0.4.1/24",
+ .metric = 10,
+ .label = 2,
+ },
+ },
+ },
+ {
+ .hostname = "rt2",
+ .router_id = "2.2.2.2",
+ .label = 20,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt1",
+ .network =
+ "10.0.1.2/24",
+ .metric = 10,
+ .label = 3,
+ },
+ {
+ .hostname = "rt3",
+ .network =
+ "10.0.2.1/24",
+ .metric = 50,
+ .label = 4,
+ },
+ },
+ },
+ {
+ .hostname = "rt3",
+ .router_id = "3.3.3.3",
+ .label = 30,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt2",
+ .network =
+ "10.0.2.2/24",
+ .metric = 50,
+ .label = 5,
+ },
+ {
+ .hostname = "rt4",
+ .network =
+ "10.0.3.1/24",
+ .metric = 10,
+ .label = 6,
+ },
+ },
+ },
+ {
+ .hostname = "rt4",
+ .router_id = "4.4.4.4",
+ .label = 40,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt3",
+ .network =
+ "10.0.3.2/24",
+ .metric = 10,
+ .label = 7,
+ },
+ {
+ .hostname = "rt1",
+ .network =
+ "10.0.4.2/24",
+ .metric = 10,
+ .label = 8,
+ },
+ },
+ },
+ },
+};
+
+/*
+ * +---------+ +---------+
+ * | | | |
+ * | RT1 |eth-rt4 eth-rt1| RT4 |
+ * | 1.1.1.1 +---------------------+ 4.4.4.4 |
+ * | | 10.0.4.0/24 | |
+ * +---------+ +---------+
+ * |eth+rt2 eth-rt3|
+ * | |
+ * |10.0.1.0/24 |
+ * | 10.0.3.0/24|
+ * |eth-rt1 eth-rt4|
+ * +---------+ +---------+
+ * | |eth-rt3 eth-rt2| |
+ * | RT2 +---------------------+ RT3 |
+ * | 2.2.2.2 | 10.0.2.0/24 | 3.3.3.3 |
+ * | | | |
+ * +---------+ +---------+
+ *
+ * Weights:
+ * - clockwise: 10
+ * - counterclockwise: 40
+ *
+ * This is an example where 3 (!) labels are needed for the protected
+ * link RT1<->RT2, e.g. the subnet 10.0.1.0/24, to reach RT4.
+ *
+ * Because the initial P and Q spaces will not be overlapping or
+ * adjacent for this case the TI-LFA will be applied recursively.
+ */
+struct ospf_topology topo5 = {
+ .nodes =
+ {
+ {
+ .hostname = "rt1",
+ .router_id = "1.1.1.1",
+ .label = 10,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt2",
+ .network =
+ "10.0.1.1/24",
+ .metric = 40,
+ .label = 1,
+ },
+ {
+ .hostname = "rt4",
+ .network =
+ "10.0.4.1/24",
+ .metric = 10,
+ .label = 2,
+ },
+ },
+ },
+ {
+ .hostname = "rt2",
+ .router_id = "2.2.2.2",
+ .label = 20,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt1",
+ .network =
+ "10.0.1.2/24",
+ .metric = 10,
+ .label = 3,
+ },
+ {
+ .hostname = "rt3",
+ .network =
+ "10.0.2.1/24",
+ .metric = 40,
+ .label = 4,
+ },
+ },
+ },
+ {
+ .hostname = "rt3",
+ .router_id = "3.3.3.3",
+ .label = 30,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt2",
+ .network =
+ "10.0.2.2/24",
+ .metric = 10,
+ .label = 5,
+ },
+ {
+ .hostname = "rt4",
+ .network =
+ "10.0.3.1/24",
+ .metric = 40,
+ .label = 6,
+ },
+ },
+ },
+ {
+ .hostname = "rt4",
+ .router_id = "4.4.4.4",
+ .label = 40,
+ .adjacencies =
+ {
+ {
+ .hostname = "rt3",
+ .network =
+ "10.0.3.2/24",
+ .metric = 10,
+ .label = 7,
+ },
+ {
+ .hostname = "rt1",
+ .network =
+ "10.0.4.2/24",
+ .metric = 40,
+ .label = 8,
+ },
+ },
+ },
+ },
+};
diff --git a/tests/subdir.am b/tests/subdir.am
index 1f173d7f1a..370e6a49a9 100644
--- a/tests/subdir.am
+++ b/tests/subdir.am
@@ -31,6 +31,14 @@ TESTS_ISISD =
IGNORE_ISISD = --ignore=isisd/
endif
+if OSPFD
+TESTS_OSPFD = \
+ tests/ospfd/test_ospf_spf \
+ # end
+else
+TESTS_OSPFD =
+endif
+
if OSPF6D
TESTS_OSPF6D = \
tests/ospf6d/test_lsdb \
@@ -83,6 +91,7 @@ check_PROGRAMS = \
tests/lib/test_ttable \
tests/lib/test_typelist \
tests/lib/test_versioncmp \
+ tests/lib/test_xref \
tests/lib/test_zlog \
tests/lib/test_graph \
tests/lib/cli/test_cli \
@@ -90,6 +99,7 @@ check_PROGRAMS = \
tests/lib/northbound/test_oper_data \
$(TESTS_BGPD) \
$(TESTS_ISISD) \
+ $(TESTS_OSPFD) \
$(TESTS_OSPF6D) \
$(TESTS_ZEBRA) \
# end
@@ -126,6 +136,7 @@ noinst_HEADERS += \
tests/lib/cli/common_cli.h \
tests/lib/test_typelist.h \
tests/isisd/test_common.h \
+ tests/ospfd/common.h \
# end
#
@@ -145,6 +156,7 @@ TESTS_CFLAGS = \
ALL_TESTS_LDADD = lib/libfrr.la $(LIBCAP)
BGP_TEST_LDADD = bgpd/libbgp.a $(RFPLDADD) $(ALL_TESTS_LDADD) -lm
ISISD_TEST_LDADD = isisd/libisis.a $(ALL_TESTS_LDADD)
+OSPFD_TEST_LDADD = ospfd/libfrrospf.a $(ALL_TESTS_LDADD)
OSPF6_TEST_LDADD = ospf6d/libospf6.a $(ALL_TESTS_LDADD)
ZEBRA_TEST_LDADD = zebra/label_manager.o $(ALL_TESTS_LDADD)
@@ -213,6 +225,11 @@ tests_isisd_test_isis_vertex_queue_CPPFLAGS = $(TESTS_CPPFLAGS)
tests_isisd_test_isis_vertex_queue_LDADD = $(ISISD_TEST_LDADD)
tests_isisd_test_isis_vertex_queue_SOURCES = tests/isisd/test_isis_vertex_queue.c tests/isisd/test_common.c
+tests_ospfd_test_ospf_spf_CFLAGS = $(TESTS_CFLAGS)
+tests_ospfd_test_ospf_spf_CPPFLAGS = $(TESTS_CPPFLAGS)
+tests_ospfd_test_ospf_spf_LDADD = $(OSPFD_TEST_LDADD)
+tests_ospfd_test_ospf_spf_SOURCES = tests/ospfd/test_ospf_spf.c tests/ospfd/common.c tests/ospfd/topologies.c
+
tests_lib_cxxcompat_CFLAGS = $(TESTS_CFLAGS) $(CXX_COMPAT_CFLAGS) $(WERROR)
tests_lib_cxxcompat_CPPFLAGS = $(TESTS_CPPFLAGS)
tests_lib_cxxcompat_SOURCES = tests/lib/cxxcompat.c
@@ -334,6 +351,10 @@ tests_lib_test_versioncmp_CFLAGS = $(TESTS_CFLAGS)
tests_lib_test_versioncmp_CPPFLAGS = $(TESTS_CPPFLAGS)
tests_lib_test_versioncmp_LDADD = $(ALL_TESTS_LDADD)
tests_lib_test_versioncmp_SOURCES = tests/lib/test_versioncmp.c
+tests_lib_test_xref_CFLAGS = $(TESTS_CFLAGS)
+tests_lib_test_xref_CPPFLAGS = $(TESTS_CPPFLAGS)
+tests_lib_test_xref_LDADD = $(ALL_TESTS_LDADD)
+tests_lib_test_xref_SOURCES = tests/lib/test_xref.c
tests_lib_test_zlog_CFLAGS = $(TESTS_CFLAGS)
tests_lib_test_zlog_CPPFLAGS = $(TESTS_CPPFLAGS)
tests_lib_test_zlog_LDADD = $(ALL_TESTS_LDADD)
@@ -370,6 +391,9 @@ EXTRA_DIST += \
tests/isisd/test_isis_spf.in \
tests/isisd/test_isis_spf.refout \
tests/isisd/test_isis_vertex_queue.py \
+ tests/ospfd/test_ospf_spf.py \
+ tests/ospfd/test_ospf_spf.in \
+ tests/ospfd/test_ospf_spf.refout \
tests/lib/cli/test_commands.in \
tests/lib/cli/test_commands.py \
tests/lib/cli/test_commands.refout \
@@ -394,6 +418,7 @@ EXTRA_DIST += \
tests/lib/test_ttable.refout \
tests/lib/test_typelist.py \
tests/lib/test_versioncmp.py \
+ tests/lib/test_xref.py \
tests/lib/test_zlog.py \
tests/lib/test_graph.py \
tests/lib/test_graph.refout \
diff --git a/tests/topotests/all-protocol-startup/r1/ip_nht.ref b/tests/topotests/all-protocol-startup/r1/ip_nht.ref
index 098e3bf387..1da4da4df5 100644
--- a/tests/topotests/all-protocol-startup/r1/ip_nht.ref
+++ b/tests/topotests/all-protocol-startup/r1/ip_nht.ref
@@ -39,6 +39,18 @@
4.4.4.2
unresolved
Client list: pbr(fd XX)
+6.6.6.1
+ unresolved
+ Client list: pbr(fd XX)
+6.6.6.2
+ unresolved
+ Client list: pbr(fd XX)
+6.6.6.3
+ unresolved
+ Client list: pbr(fd XX)
+6.6.6.4
+ unresolved
+ Client list: pbr(fd XX)
192.168.0.2
resolved via connected
is directly connected, r1-eth0
diff --git a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py
index 24bef07ec2..5942aca71d 100644
--- a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py
+++ b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py
@@ -82,6 +82,7 @@ class NetworkTopo(Topo):
##
#####################################################
+
@pytest.mark.isis
@pytest.mark.ospf
@pytest.mark.rip
@@ -344,7 +345,7 @@ def test_converge_protocols():
actual = (
net["r%s" % i]
.cmd(
- 'vtysh -c "show ip route" | /usr/bin/tail -n +7 | env LC_ALL=en_US.UTF-8 sort 2> /dev/null'
+ 'vtysh -c "show ip route" | sed -e \'/^Codes: /,/^\s*$/d\' | env LC_ALL=en_US.UTF-8 sort 2> /dev/null'
)
.rstrip()
)
@@ -375,7 +376,7 @@ def test_converge_protocols():
actual = (
net["r%s" % i]
.cmd(
- 'vtysh -c "show ipv6 route" | /usr/bin/tail -n +7 | env LC_ALL=en_US.UTF-8 sort 2> /dev/null'
+ 'vtysh -c "show ipv6 route" | sed -e \'/^Codes: /,/^\s*$/d\' | env LC_ALL=en_US.UTF-8 sort 2> /dev/null'
)
.rstrip()
)
@@ -537,6 +538,51 @@ def test_nexthop_groups():
verify_route_nexthop_group("5.5.5.1/32")
+ ## 4-way ECMP Routes Pointing to Each Other
+
+ # This is to check for a bug with NH resolution where
+ # routes would infintely resolve to each other blowing
+ # up the resolved-> nexthop pointer.
+
+ net["r1"].cmd(
+ 'vtysh -c "c t" -c "nexthop-group infinite-recursive" -c "nexthop 6.6.6.1" -c "nexthop 6.6.6.2" \
+ -c "nexthop 6.6.6.3" -c "nexthop 6.6.6.4"'
+ )
+
+ # static route nexthops can recurse to
+
+ net["r1"].cmd('vtysh -c "c t" -c "ip route 6.6.6.0/24 1.1.1.1"')
+
+ # Make routes that point to themselves in ecmp
+
+ net["r1"].cmd(
+ 'vtysh -c "sharp install routes 6.6.6.4 nexthop-group infinite-recursive 1"'
+ )
+
+ net["r1"].cmd(
+ 'vtysh -c "sharp install routes 6.6.6.3 nexthop-group infinite-recursive 1"'
+ )
+
+ net["r1"].cmd(
+ 'vtysh -c "sharp install routes 6.6.6.2 nexthop-group infinite-recursive 1"'
+ )
+
+ net["r1"].cmd(
+ 'vtysh -c "sharp install routes 6.6.6.1 nexthop-group infinite-recursive 1"'
+ )
+
+ # Get routes and test if has too many (duplicate) nexthops
+ nhg_id = route_get_nhg_id("6.6.6.1/32")
+ output = net["r1"].cmd('vtysh -c "show nexthop-group rib %d"' % nhg_id)
+
+ dups = re.findall(r"(via 1\.1\.1\.1)", output)
+
+ # Should find 3, itself is inactive
+ assert len(dups) == 3, (
+ "Route 6.6.6.1/32 with Nexthop Group ID=%d has wrong number of resolved nexthops"
+ % nhg_id
+ )
+
##CLI(net)
## Remove all NHG routes
@@ -548,6 +594,8 @@ def test_nexthop_groups():
net["r1"].cmd('vtysh -c "sharp remove routes 4.4.4.1 1"')
net["r1"].cmd('vtysh -c "sharp remove routes 4.4.4.2 1"')
net["r1"].cmd('vtysh -c "sharp remove routes 5.5.5.1 1"')
+ net["r1"].cmd('vtysh -c "sharp remove routes 6.6.6.1 4"')
+ net["r1"].cmd('vtysh -c "c t" -c "no ip route 6.6.6.0/24 1.1.1.1"')
def test_rip_status():
diff --git a/tests/topotests/bfd-ospf-topo1/__init__.py b/tests/topotests/bfd-ospf-topo1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/__init__.py
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/bfdd.conf b/tests/topotests/bfd-ospf-topo1/rt1/bfdd.conf
new file mode 100644
index 0000000000..610a20f88a
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/bfdd.conf
@@ -0,0 +1,9 @@
+log file bfdd.log
+log timestamp precision 3
+!
+debug bfd network
+debug bfd peer
+debug bfd zebra
+!
+bfd
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/ospf6d.conf b/tests/topotests/bfd-ospf-topo1/rt1/ospf6d.conf
new file mode 100644
index 0000000000..18def599b4
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/ospf6d.conf
@@ -0,0 +1,21 @@
+log file ospf6d.log
+log timestamp precision 3
+!
+hostname rt1
+!
+password 1
+!
+interface eth-rt2
+ ipv6 ospf6 network broadcast
+ ipv6 ospf6 bfd
+!
+interface eth-rt3
+ ipv6 ospf6 network broadcast
+ ipv6 ospf6 bfd
+!
+router ospf6
+ ospf6 router-id 1.1.1.1
+ interface eth-rt2 area 0.0.0.0
+ interface eth-rt3 area 0.0.0.0
+ redistribute connected
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/ospfd.conf b/tests/topotests/bfd-ospf-topo1/rt1/ospfd.conf
new file mode 100644
index 0000000000..07b42f9885
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/ospfd.conf
@@ -0,0 +1,26 @@
+log file ospfd.log
+log timestamp precision 3
+!
+hostname rt1
+!
+password 1
+!
+debug ospf event
+debug ospf zebra
+!
+interface lo
+ ip ospf area 0.0.0.0
+!
+interface eth-rt2
+ ip ospf area 0.0.0.0
+ ip ospf bfd
+!
+interface eth-rt3
+ ip ospf area 0.0.0.0
+ ip ospf bfd
+!
+router ospf
+ ospf router-id 1.1.1.1
+ passive interface lo
+ router-info area 0.0.0.0
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step1/show_ip_route.ref b/tests/topotests/bfd-ospf-topo1/rt1/step1/show_ip_route.ref
new file mode 100644
index 0000000000..f354eff697
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/step1/show_ip_route.ref
@@ -0,0 +1,74 @@
+{
+ "2.2.2.2\/32":[
+ {
+ "prefix":"2.2.2.2\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "3.3.3.3\/32":[
+ {
+ "prefix":"3.3.3.3\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.2.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "4.4.4.4\/32":[
+ {
+ "prefix":"4.4.4.4\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.2.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "5.5.5.5\/32":[
+ {
+ "prefix":"5.5.5.5\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step1/show_ipv6_route.ref b/tests/topotests/bfd-ospf-topo1/rt1/step1/show_ipv6_route.ref
new file mode 100644
index 0000000000..6465efb8b5
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/step1/show_ipv6_route.ref
@@ -0,0 +1,70 @@
+{
+ "::ffff:202:202\/128":[
+ {
+ "prefix":"::ffff:202:202\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "::ffff:303:303\/128":[
+ {
+ "prefix":"::ffff:303:303\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "::ffff:404:404\/128":[
+ {
+ "prefix":"::ffff:404:404\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "::ffff:505:505\/128":[
+ {
+ "prefix":"::ffff:505:505\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step2/show_bfd_peers.ref b/tests/topotests/bfd-ospf-topo1/rt1/step2/show_bfd_peers.ref
new file mode 100644
index 0000000000..63f0d50784
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/step2/show_bfd_peers.ref
@@ -0,0 +1,26 @@
+[
+ {
+ "interface": "eth-rt3",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ },
+ {
+ "interface": "eth-rt2",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ },
+ {
+ "interface": "eth-rt3",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ },
+ {
+ "interface": "eth-rt2",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ }
+]
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_healthy.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_healthy.ref
new file mode 100644
index 0000000000..42051f9582
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_healthy.ref
@@ -0,0 +1,28 @@
+[
+ {
+ "peer": "10.0.2.2",
+ "interface": "eth-rt3",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ },
+ {
+ "peer": "10.0.1.2",
+ "interface": "eth-rt2",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ },
+ {
+ "interface": "eth-rt3",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ },
+ {
+ "interface": "eth-rt2",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ }
+]
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt2_down.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt2_down.ref
new file mode 100644
index 0000000000..d844ee6813
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt2_down.ref
@@ -0,0 +1,15 @@
+[
+ {
+ "peer": "10.0.2.2",
+ "interface": "eth-rt3",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ },
+ {
+ "interface": "eth-rt3",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ }
+]
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt3_down.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt3_down.ref
new file mode 100644
index 0000000000..32799084fb
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt3_down.ref
@@ -0,0 +1,15 @@
+[
+ {
+ "peer": "10.0.1.2",
+ "interface": "eth-rt2",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ },
+ {
+ "interface": "eth-rt2",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ }
+]
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_healthy.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_healthy.ref
new file mode 100644
index 0000000000..f354eff697
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_healthy.ref
@@ -0,0 +1,74 @@
+{
+ "2.2.2.2\/32":[
+ {
+ "prefix":"2.2.2.2\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "3.3.3.3\/32":[
+ {
+ "prefix":"3.3.3.3\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.2.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "4.4.4.4\/32":[
+ {
+ "prefix":"4.4.4.4\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.2.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "5.5.5.5\/32":[
+ {
+ "prefix":"5.5.5.5\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt2_down.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt2_down.ref
new file mode 100644
index 0000000000..43eecd0b7a
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt2_down.ref
@@ -0,0 +1,74 @@
+{
+ "2.2.2.2\/32":[
+ {
+ "prefix":"2.2.2.2\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.2.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "3.3.3.3\/32":[
+ {
+ "prefix":"3.3.3.3\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.2.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "4.4.4.4\/32":[
+ {
+ "prefix":"4.4.4.4\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.2.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "5.5.5.5\/32":[
+ {
+ "prefix":"5.5.5.5\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.2.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt3_down.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt3_down.ref
new file mode 100644
index 0000000000..409af6308b
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt3_down.ref
@@ -0,0 +1,74 @@
+{
+ "2.2.2.2\/32":[
+ {
+ "prefix":"2.2.2.2\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "3.3.3.3\/32":[
+ {
+ "prefix":"3.3.3.3\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "4.4.4.4\/32":[
+ {
+ "prefix":"4.4.4.4\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "5.5.5.5\/32":[
+ {
+ "prefix":"5.5.5.5\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_healthy.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_healthy.ref
new file mode 100644
index 0000000000..6465efb8b5
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_healthy.ref
@@ -0,0 +1,70 @@
+{
+ "::ffff:202:202\/128":[
+ {
+ "prefix":"::ffff:202:202\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "::ffff:303:303\/128":[
+ {
+ "prefix":"::ffff:303:303\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "::ffff:404:404\/128":[
+ {
+ "prefix":"::ffff:404:404\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "::ffff:505:505\/128":[
+ {
+ "prefix":"::ffff:505:505\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt2_down.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt2_down.ref
new file mode 100644
index 0000000000..cfb1ef1bb6
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt2_down.ref
@@ -0,0 +1,70 @@
+{
+ "::ffff:202:202\/128":[
+ {
+ "prefix":"::ffff:202:202\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "::ffff:303:303\/128":[
+ {
+ "prefix":"::ffff:303:303\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "::ffff:404:404\/128":[
+ {
+ "prefix":"::ffff:404:404\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "::ffff:505:505\/128":[
+ {
+ "prefix":"::ffff:505:505\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt3_down.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt3_down.ref
new file mode 100644
index 0000000000..58b44da5c2
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt3_down.ref
@@ -0,0 +1,70 @@
+{
+ "::ffff:202:202\/128":[
+ {
+ "prefix":"::ffff:202:202\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "::ffff:303:303\/128":[
+ {
+ "prefix":"::ffff:303:303\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "::ffff:404:404\/128":[
+ {
+ "prefix":"::ffff:404:404\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "::ffff:505:505\/128":[
+ {
+ "prefix":"::ffff:505:505\/128",
+ "protocol":"ospf6",
+ "selected":true,
+ "destSelected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bfd-ospf-topo1/rt1/zebra.conf b/tests/topotests/bfd-ospf-topo1/rt1/zebra.conf
new file mode 100644
index 0000000000..6003125b6b
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt1/zebra.conf
@@ -0,0 +1,25 @@
+log file zebra.log
+log timestamp precision 3
+!
+hostname rt1
+!
+debug zebra kernel
+debug zebra packet
+debug zebra events
+debug zebra rib
+!
+interface lo
+ ip address 1.1.1.1/32
+ ipv6 address ::ffff:0101:0101/128
+!
+interface eth-rt2
+ ip address 10.0.1.1/24
+!
+interface eth-rt3
+ ip address 10.0.2.1/24
+!
+ip forwarding
+ipv6 forwarding
+!
+line vty
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt2/bfdd.conf b/tests/topotests/bfd-ospf-topo1/rt2/bfdd.conf
new file mode 100644
index 0000000000..437f063d8f
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt2/bfdd.conf
@@ -0,0 +1,7 @@
+!
+debug bfd network
+debug bfd peer
+debug bfd zebra
+!
+bfd
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt2/ospf6d.conf b/tests/topotests/bfd-ospf-topo1/rt2/ospf6d.conf
new file mode 100644
index 0000000000..2f35099564
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt2/ospf6d.conf
@@ -0,0 +1,19 @@
+log file ospf6d.log
+!
+hostname rt2
+!
+password 1
+!
+interface eth-rt1
+ ipv6 ospf6 network broadcast
+ ipv6 ospf6 bfd
+!
+interface eth-rt5
+ ipv6 ospf6 network broadcast
+!
+router ospf6
+ ospf6 router-id 2.2.2.2
+ interface eth-rt1 area 0.0.0.0
+ interface eth-rt5 area 0.0.0.0
+ redistribute connected
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt2/ospfd.conf b/tests/topotests/bfd-ospf-topo1/rt2/ospfd.conf
new file mode 100644
index 0000000000..a05d8b58c8
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt2/ospfd.conf
@@ -0,0 +1,24 @@
+log file ospfd.log
+!
+hostname rt2
+!
+password 1
+!
+debug ospf event
+debug ospf zebra
+!
+interface lo
+ ip ospf area 0.0.0.0
+!
+interface eth-rt1
+ ip ospf area 0.0.0.0
+ ip ospf bfd
+!
+interface eth-rt5
+ ip ospf area 0.0.0.0
+!
+router ospf
+ ospf router-id 2.2.2.2
+ passive interface lo
+ router-info area 0.0.0.0
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt2/step2/show_bfd_peers.ref b/tests/topotests/bfd-ospf-topo1/rt2/step2/show_bfd_peers.ref
new file mode 100644
index 0000000000..d6df1ebfb2
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt2/step2/show_bfd_peers.ref
@@ -0,0 +1,14 @@
+[
+ {
+ "interface": "eth-rt1",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ },
+ {
+ "interface": "eth-rt1",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ }
+]
diff --git a/tests/topotests/bfd-ospf-topo1/rt2/zebra.conf b/tests/topotests/bfd-ospf-topo1/rt2/zebra.conf
new file mode 100644
index 0000000000..5fc7fc5b28
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt2/zebra.conf
@@ -0,0 +1,22 @@
+log file zebra.log
+!
+hostname rt2
+!
+debug zebra kernel
+debug zebra packet
+!
+interface lo
+ ip address 2.2.2.2/32
+ ipv6 address ::ffff:0202:0202/128
+!
+interface eth-rt1
+ ip address 10.0.1.2/24
+!
+interface eth-rt5
+ ip address 10.0.3.1/24
+!
+ip forwarding
+ipv6 forwarding
+!
+line vty
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt3/bfdd.conf b/tests/topotests/bfd-ospf-topo1/rt3/bfdd.conf
new file mode 100644
index 0000000000..437f063d8f
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt3/bfdd.conf
@@ -0,0 +1,7 @@
+!
+debug bfd network
+debug bfd peer
+debug bfd zebra
+!
+bfd
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt3/ospf6d.conf b/tests/topotests/bfd-ospf-topo1/rt3/ospf6d.conf
new file mode 100644
index 0000000000..3e8777019e
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt3/ospf6d.conf
@@ -0,0 +1,19 @@
+log file ospf6d.log
+!
+hostname rt3
+!
+password 1
+!
+interface eth-rt1
+ ipv6 ospf6 network broadcast
+ ipv6 ospf6 bfd
+!
+interface eth-rt4
+ ipv6 ospf6 network broadcast
+!
+router ospf6
+ ospf6 router-id 3.3.3.3
+ interface eth-rt1 area 0.0.0.0
+ interface eth-rt4 area 0.0.0.0
+ redistribute connected
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt3/ospfd.conf b/tests/topotests/bfd-ospf-topo1/rt3/ospfd.conf
new file mode 100644
index 0000000000..1196e6d189
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt3/ospfd.conf
@@ -0,0 +1,24 @@
+log file ospfd.log
+!
+hostname rt3
+!
+password 1
+!
+debug ospf event
+debug ospf zebra
+!
+interface lo
+ ip ospf area 0.0.0.0
+!
+interface eth-rt1
+ ip ospf area 0.0.0.0
+ ip ospf bfd
+!
+interface eth-rt4
+ ip ospf area 0.0.0.0
+!
+router ospf
+ ospf router-id 3.3.3.3
+ passive interface lo
+ router-info area 0.0.0.0
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt3/step2/show_bfd_peers.ref b/tests/topotests/bfd-ospf-topo1/rt3/step2/show_bfd_peers.ref
new file mode 100644
index 0000000000..d6df1ebfb2
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt3/step2/show_bfd_peers.ref
@@ -0,0 +1,14 @@
+[
+ {
+ "interface": "eth-rt1",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ },
+ {
+ "interface": "eth-rt1",
+ "status": "up",
+ "diagnostic": "ok",
+ "remote-diagnostic": "ok"
+ }
+]
diff --git a/tests/topotests/bfd-ospf-topo1/rt3/zebra.conf b/tests/topotests/bfd-ospf-topo1/rt3/zebra.conf
new file mode 100644
index 0000000000..d368de9bbe
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt3/zebra.conf
@@ -0,0 +1,22 @@
+log file zebra.log
+!
+hostname rt3
+!
+debug zebra kernel
+debug zebra packet
+!
+interface lo
+ ip address 3.3.3.3/32
+ ipv6 address ::ffff:0303:0303/128
+!
+interface eth-rt1
+ ip address 10.0.2.2/24
+!
+interface eth-rt4
+ ip address 10.0.4.1/24
+!
+ip forwarding
+ipv6 forwarding
+!
+line vty
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt4/bfdd.conf b/tests/topotests/bfd-ospf-topo1/rt4/bfdd.conf
new file mode 100644
index 0000000000..f35e772790
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt4/bfdd.conf
@@ -0,0 +1,5 @@
+!
+debug bfd network
+debug bfd peer
+debug bfd zebra
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt4/ospf6d.conf b/tests/topotests/bfd-ospf-topo1/rt4/ospf6d.conf
new file mode 100644
index 0000000000..bccd1e75bd
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt4/ospf6d.conf
@@ -0,0 +1,18 @@
+log file ospf6d.log
+!
+hostname rt4
+!
+password 1
+!
+interface eth-rt3
+ ipv6 ospf6 network broadcast
+!
+interface eth-rt5
+ ipv6 ospf6 network broadcast
+!
+router ospf6
+ ospf6 router-id 4.4.4.4
+ interface eth-rt3 area 0.0.0.0
+ interface eth-rt5 area 0.0.0.0
+ redistribute connected
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt4/ospfd.conf b/tests/topotests/bfd-ospf-topo1/rt4/ospfd.conf
new file mode 100644
index 0000000000..3a2568b4ab
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt4/ospfd.conf
@@ -0,0 +1,23 @@
+log file ospfd.log
+!
+hostname rt4
+!
+password 1
+!
+debug ospf event
+debug ospf zebra
+!
+interface lo
+ ip ospf area 0.0.0.0
+!
+interface eth-rt3
+ ip ospf area 0.0.0.0
+!
+interface eth-rt5
+ ip ospf area 0.0.0.0
+!
+router ospf
+ ospf router-id 4.4.4.4
+ passive interface lo
+ router-info area 0.0.0.0
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt4/zebra.conf b/tests/topotests/bfd-ospf-topo1/rt4/zebra.conf
new file mode 100644
index 0000000000..7b053bac35
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt4/zebra.conf
@@ -0,0 +1,22 @@
+log file zebra.log
+!
+hostname rt4
+!
+debug zebra kernel
+debug zebra packet
+!
+interface lo
+ ip address 4.4.4.4/32
+ ipv6 address ::ffff:0404:0404/128
+!
+interface eth-rt3
+ ip address 10.0.4.2/24
+!
+interface eth-rt5
+ ip address 10.0.5.1/24
+!
+ip forwarding
+ipv6 forwarding
+!
+line vty
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt5/bfdd.conf b/tests/topotests/bfd-ospf-topo1/rt5/bfdd.conf
new file mode 100644
index 0000000000..f35e772790
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt5/bfdd.conf
@@ -0,0 +1,5 @@
+!
+debug bfd network
+debug bfd peer
+debug bfd zebra
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt5/ospf6d.conf b/tests/topotests/bfd-ospf-topo1/rt5/ospf6d.conf
new file mode 100644
index 0000000000..766862276c
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt5/ospf6d.conf
@@ -0,0 +1,18 @@
+log file ospf6d.log
+!
+hostname rt5
+!
+password 1
+!
+interface eth-rt2
+ ipv6 ospf6 network broadcast
+!
+interface eth-rt4
+ ipv6 ospf6 network broadcast
+!
+router ospf6
+ ospf6 router-id 5.5.5.5
+ interface eth-rt2 area 0.0.0.0
+ interface eth-rt4 area 0.0.0.0
+ redistribute connected
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt5/ospfd.conf b/tests/topotests/bfd-ospf-topo1/rt5/ospfd.conf
new file mode 100644
index 0000000000..a35de5f45f
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt5/ospfd.conf
@@ -0,0 +1,23 @@
+log file ospfd.log
+!
+hostname rt5
+!
+password 1
+!
+debug ospf event
+debug ospf zebra
+!
+interface lo
+ ip ospf area 0.0.0.0
+!
+interface eth-rt2
+ ip ospf area 0.0.0.0
+!
+interface eth-rt4
+ ip ospf area 0.0.0.0
+!
+router ospf
+ ospf router-id 5.5.5.5
+ passive interface lo
+ router-info area 0.0.0.0
+!
diff --git a/tests/topotests/bfd-ospf-topo1/rt5/zebra.conf b/tests/topotests/bfd-ospf-topo1/rt5/zebra.conf
new file mode 100644
index 0000000000..0b7c9e02f3
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/rt5/zebra.conf
@@ -0,0 +1,22 @@
+log file zebra.log
+!
+hostname rt5
+!
+debug zebra kernel
+debug zebra packet
+!
+interface lo
+ ip address 5.5.5.5/32
+ ipv6 address ::ffff:0505:0505/128
+!
+interface eth-rt2
+ ip address 10.0.3.2/24
+!
+interface eth-rt4
+ ip address 10.0.5.2/24
+!
+ip forwarding
+ipv6 forwarding
+!
+line vty
+!
diff --git a/tests/topotests/bfd-ospf-topo1/test_bfd_ospf_topo1.py b/tests/topotests/bfd-ospf-topo1/test_bfd_ospf_topo1.py
new file mode 100755
index 0000000000..1cec62789b
--- /dev/null
+++ b/tests/topotests/bfd-ospf-topo1/test_bfd_ospf_topo1.py
@@ -0,0 +1,307 @@
+#!/usr/bin/env python
+
+#
+# test_bfd_ospf_topo1.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2020 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_bfd_ospf_topo1.py:
+
+ +---------+
+ | |
+ eth-rt2 (.1) | RT1 | eth-rt3 (.1)
+ +----------+ 1.1.1.1 +----------+
+ | | | |
+ | +---------+ |
+ | |
+ | 10.0.2.0/24 |
+ | |
+ | eth-rt1 | (.2)
+ | 10.0.1.0/24 +----+----+
+ | | |
+ | | RT3 |
+ | | 3.3.3.3 |
+ | | |
+ (.2) | eth-rt1 +----+----+
+ +----+----+ eth-rt4 | (.1)
+ | | |
+ | RT2 | |
+ | 2.2.2.2 | 10.0.4.0/24 |
+ | | |
+ +----+----+ |
+ (.1) | eth-rt5 eth-rt3 | (.2)
+ | +----+----+
+ | | |
+ | | RT4 |
+ | | 4.4.4.4 |
+ | | |
+ | +----+----+
+ | 10.0.3.0/24 eth-rt5 | (.1)
+ | |
+ | |
+ | 10.0.5.0/24 |
+ | |
+ | +---------+ |
+ | | | |
+ +----------+ RT5 +----------+
+ eth-rt2 (.2) | 5.5.5.5 | eth-rt4 (.2)
+ | |
+ +---------+
+
+"""
+
+import os
+import sys
+import pytest
+import json
+import re
+from time import sleep
+from time import time
+from functools import partial
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+
+class TemplateTopo(Topo):
+ "Test topology builder"
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ #
+ # Define FRR Routers
+ #
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5"]:
+ tgen.add_router(router)
+
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1")
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1")
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt2")
+
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt4")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt3")
+
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ # For all registered routers, load the zebra configuration file
+ for rname, router in router_list.iteritems():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_OSPF6, os.path.join(CWD, "{}/ospf6d.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+
+
+def print_cmd_result(rname, command):
+ print(get_topogen().gears[rname].vtysh_cmd(command, isjson=False))
+
+
+def router_compare_json_output(rname, command, reference, count=120, wait=0.5):
+ "Compare router JSON output"
+
+ logger.info('Comparing router "%s" "%s" output', rname, command)
+
+ tgen = get_topogen()
+ filename = "{}/{}/{}".format(CWD, rname, reference)
+ expected = json.loads(open(filename).read())
+
+ # Run test function until we get an result. Wait at most 60 seconds.
+ test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected)
+ _, diff = topotest.run_and_expect(test_func, None, count=count, wait=wait)
+ assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
+ assert diff is None, assertmsg
+
+
+## TEST STEPS
+
+
+def test_rib_ospf_step1():
+ logger.info("Test (step 1): verify RIB for OSPF")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router_compare_json_output(
+ "rt1", "show ip route ospf json", "step1/show_ip_route.ref"
+ )
+ router_compare_json_output(
+ "rt1", "show ipv6 route ospf json", "step1/show_ipv6_route.ref"
+ )
+
+
+def test_bfd_ospf_sessions_step2():
+ logger.info("Test (step 2): verify BFD peers for OSPF")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # BFD is just used on three routers
+ for rt in ["rt1", "rt2", "rt3"]:
+ router_compare_json_output(
+ rt, "show bfd peers json", "step2/show_bfd_peers.ref"
+ )
+
+
+def test_bfd_ospf_interface_failure_rt2_step3():
+ logger.info("Test (step 3): Check failover handling with RT2 down")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Let's kill the interface on rt2 and see what happens with the RIB and BFD on rt1
+ tgen.gears["rt2"].link_enable("eth-rt1", enabled=False)
+
+ # By default BFD provides a recovery time of 900ms plus jitter, so let's wait
+ # initial 2 seconds to let the CI not suffer.
+ # TODO: add check for array size
+ sleep(2)
+ router_compare_json_output(
+ "rt1", "show ip route ospf json", "step3/show_ip_route_rt2_down.ref", 1, 0
+ )
+ router_compare_json_output(
+ "rt1", "show ipv6 route ospf json", "step3/show_ipv6_route_rt2_down.ref", 1, 0
+ )
+ router_compare_json_output(
+ "rt1", "show bfd peers json", "step3/show_bfd_peers_rt2_down.ref", 1, 0
+ )
+
+ # Check recovery, this can take some time
+ tgen.gears["rt2"].link_enable("eth-rt1", enabled=True)
+
+ router_compare_json_output(
+ "rt1", "show ip route ospf json", "step3/show_ip_route_healthy.ref"
+ )
+ router_compare_json_output(
+ "rt1", "show ipv6 route ospf json", "step3/show_ipv6_route_healthy.ref"
+ )
+ router_compare_json_output(
+ "rt1", "show bfd peers json", "step3/show_bfd_peers_healthy.ref"
+ )
+
+
+def test_bfd_ospf_interface_failure_rt3_step3():
+ logger.info("Test (step 3): Check failover handling with RT3 down")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Let's kill the interface on rt3 and see what happens with the RIB and BFD on rt1
+ tgen.gears["rt3"].link_enable("eth-rt1", enabled=False)
+
+ # By default BFD provides a recovery time of 900ms plus jitter, so let's wait
+ # initial 2 seconds to let the CI not suffer.
+ # TODO: add check for array size
+ sleep(2)
+ router_compare_json_output(
+ "rt1", "show ip route ospf json", "step3/show_ip_route_rt3_down.ref", 1, 0
+ )
+ router_compare_json_output(
+ "rt1", "show ipv6 route ospf json", "step3/show_ipv6_route_rt3_down.ref", 1, 0
+ )
+ router_compare_json_output(
+ "rt1", "show bfd peers json", "step3/show_bfd_peers_rt3_down.ref", 1, 0
+ )
+
+ # Check recovery, this can take some time
+ tgen.gears["rt3"].link_enable("eth-rt1", enabled=True)
+
+ router_compare_json_output(
+ "rt1", "show ip route ospf json", "step3/show_ip_route_healthy.ref"
+ )
+ router_compare_json_output(
+ "rt1", "show ipv6 route ospf json", "step3/show_ipv6_route_healthy.ref"
+ )
+ router_compare_json_output(
+ "rt1", "show bfd peers json", "step3/show_bfd_peers_healthy.ref"
+ )
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp-evpn-mh/test_evpn_mh.py b/tests/topotests/bgp-evpn-mh/test_evpn_mh.py
index 4e37ab00a3..61be947a71 100644
--- a/tests/topotests/bgp-evpn-mh/test_evpn_mh.py
+++ b/tests/topotests/bgp-evpn-mh/test_evpn_mh.py
@@ -35,6 +35,8 @@ import json
import platform
from functools import partial
+pytestmark = pytest.mark.pimd
+
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
@@ -362,7 +364,7 @@ def config_hosts(tgen, hosts):
host = tgen.gears[host_name]
config_host(host_name, host)
-@pytest.mark.pim
+
def setup_module(module):
"Setup topology"
tgen = Topogen(NetworkTopo, module.__name__)
diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE1/bgpd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/bgpd.conf
index c7a76a98ed..991a1e7e56 100644
--- a/tests/topotests/bgp-evpn-vxlan_topo1/PE1/bgpd.conf
+++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/bgpd.conf
@@ -8,3 +8,4 @@ router bgp 65000
address-family l2vpn evpn
neighbor 10.30.30.30 activate
advertise-all-vni
+ advertise-svi-ip
diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE1/evpn.vni.json b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/evpn.vni.json
index 2937504244..e500a1d85c 100644
--- a/tests/topotests/bgp-evpn-vxlan_topo1/PE1/evpn.vni.json
+++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/evpn.vni.json
@@ -6,8 +6,8 @@
"vtepIp":"10.10.10.10",
"mcastGroup":"0.0.0.0",
"advertiseGatewayMacip":"No",
- "numMacs":5,
- "numArpNd":3,
+ "numMacs":6,
+ "numArpNd":6,
"numRemoteVteps":[
"10.30.30.30"
]
diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE2/bgpd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/bgpd.conf
index 0a24158bb8..52f8687bc1 100644
--- a/tests/topotests/bgp-evpn-vxlan_topo1/PE2/bgpd.conf
+++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/bgpd.conf
@@ -9,3 +9,4 @@ router bgp 65000
address-family l2vpn evpn
neighbor 10.10.10.10 activate
advertise-all-vni
+ advertise-svi-ip
diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE2/evpn.vni.json b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/evpn.vni.json
index 0853147a00..0a56a235bd 100644
--- a/tests/topotests/bgp-evpn-vxlan_topo1/PE2/evpn.vni.json
+++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/evpn.vni.json
@@ -6,8 +6,8 @@
"vtepIp":"10.30.30.30",
"mcastGroup":"0.0.0.0",
"advertiseGatewayMacip":"No",
- "numMacs":5,
- "numArpNd":3,
+ "numMacs":6,
+ "numArpNd":6,
"numRemoteVteps":[
"10.10.10.10"
]
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce1/bgpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce1/bgpd.conf
new file mode 100644
index 0000000000..b598666dfb
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce1/bgpd.conf
@@ -0,0 +1,12 @@
+log file /tmp/bgpd.log debugging
+!
+router bgp 65001
+ timers bgp 3 9
+ bgp router-id 192.168.100.10
+ neighbor 192.168.100.20 remote-as 65001
+ neighbor 192.168.100.20 update-source 192.168.100.10
+ !
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce1/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce1/snmpd.conf
new file mode 100644
index 0000000000..36218d3538
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce1/snmpd.conf
@@ -0,0 +1,15 @@
+agentAddress udp:10.5.5.5:161
+
+com2sec public localhost public
+
+group public_group v1 public
+group public_group v2c public
+
+access public_group "" any noauth prefix all all none
+
+view all included .1
+
+iquerySecName frr
+rouser frr
+
+master agentx
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce1/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce1/zebra.conf
new file mode 100644
index 0000000000..8ad2ddc48c
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce1/zebra.conf
@@ -0,0 +1,19 @@
+log file /tmp/zebra.log
+log stdout
+!
+debug zebra events
+debug zebra dplane
+!
+!
+interface ce1-eth0
+ ip address 192.168.100.10/24
+ ipv6 address 2000:1:1:100::10/64
+!
+!
+interface lo
+ ip address 10.5.5.5/32
+ ipv6 address 2000:5:5:5::5/128
+!
+!
+!
+line vty
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce2/bgpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce2/bgpd.conf
new file mode 100644
index 0000000000..e388ccba8a
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce2/bgpd.conf
@@ -0,0 +1,12 @@
+log file /tmp/bgpd.log debugging
+!
+router bgp 65001
+ bgp router-id 192.168.200.10
+ timers bgp 3 9
+ neighbor 192.168.200.20 remote-as 65001
+ neighbor 192.168.200.20 update-source 192.168.200.10
+ !
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce2/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce2/snmpd.conf
new file mode 100644
index 0000000000..714585cb9b
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce2/snmpd.conf
@@ -0,0 +1,15 @@
+agentAddress udp:10.6.6.6:161
+
+com2sec public localhost public
+
+group public_group v1 public
+group public_group v2c public
+
+access public_group "" any noauth prefix all all none
+
+view all included .1
+
+iquerySecName frr
+rouser frr
+
+master agentx
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce2/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce2/zebra.conf
new file mode 100644
index 0000000000..fa2e968e55
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce2/zebra.conf
@@ -0,0 +1,19 @@
+log file /tmp/zebra.log
+log stdout
+!
+debug zebra events
+debug zebra dplane
+!
+!
+interface ce2-eth0
+ ip address 192.168.200.10/24
+ ipv6 address 2000:1:1:200::10/64
+!
+!
+interface lo
+ ip address 10.6.6.6/32
+ ipv6 address 2000:6:6:6::6/128
+!
+!
+!
+line vty
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce3/bgpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce3/bgpd.conf
new file mode 100644
index 0000000000..e388ccba8a
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce3/bgpd.conf
@@ -0,0 +1,12 @@
+log file /tmp/bgpd.log debugging
+!
+router bgp 65001
+ bgp router-id 192.168.200.10
+ timers bgp 3 9
+ neighbor 192.168.200.20 remote-as 65001
+ neighbor 192.168.200.20 update-source 192.168.200.10
+ !
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce3/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce3/snmpd.conf
new file mode 100644
index 0000000000..36218d3538
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce3/snmpd.conf
@@ -0,0 +1,15 @@
+agentAddress udp:10.5.5.5:161
+
+com2sec public localhost public
+
+group public_group v1 public
+group public_group v2c public
+
+access public_group "" any noauth prefix all all none
+
+view all included .1
+
+iquerySecName frr
+rouser frr
+
+master agentx
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce3/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce3/zebra.conf
new file mode 100644
index 0000000000..ea91e21bad
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce3/zebra.conf
@@ -0,0 +1,19 @@
+log file /tmp/zebra.log
+log stdout
+!
+debug zebra events
+debug zebra dplane
+!
+!
+interface ce3-eth0
+ ip address 192.168.200.10/24
+ ipv6 address 2000:1:1:200::10/64
+!
+!
+interface lo
+ ip address 10.7.7.7/32
+ ipv6 address 2000:7:7:7::7/128
+!
+!
+!
+line vty
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce4/bgpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce4/bgpd.conf
new file mode 100644
index 0000000000..e388ccba8a
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce4/bgpd.conf
@@ -0,0 +1,12 @@
+log file /tmp/bgpd.log debugging
+!
+router bgp 65001
+ bgp router-id 192.168.200.10
+ timers bgp 3 9
+ neighbor 192.168.200.20 remote-as 65001
+ neighbor 192.168.200.20 update-source 192.168.200.10
+ !
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce4/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce4/snmpd.conf
new file mode 100644
index 0000000000..36218d3538
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce4/snmpd.conf
@@ -0,0 +1,15 @@
+agentAddress udp:10.5.5.5:161
+
+com2sec public localhost public
+
+group public_group v1 public
+group public_group v2c public
+
+access public_group "" any noauth prefix all all none
+
+view all included .1
+
+iquerySecName frr
+rouser frr
+
+master agentx
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce4/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce4/zebra.conf
new file mode 100644
index 0000000000..0866fa9759
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce4/zebra.conf
@@ -0,0 +1,19 @@
+log file /tmp/zebra.log
+log stdout
+!
+debug zebra events
+debug zebra dplane
+!
+!
+interface ce4-eth0
+ ip address 192.168.34.10/24
+ ipv6 address 2000:1:1:300::10/64
+!
+!
+interface lo
+ ip address 10.8.8.8/32
+ ipv6 address 2000:8:8:8::8/128
+!
+!
+!
+line vty
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r1/bgpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r1/bgpd.conf
new file mode 100644
index 0000000000..098e55d0ed
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r1/bgpd.conf
@@ -0,0 +1,48 @@
+log file /tmp/bgpd.log debugging
+!
+router bgp 65000
+ bgp router-id 10.1.1.1
+ neighbor 10.4.4.4 remote-as 65000
+ neighbor 10.4.4.4 update-source 10.1.1.1
+ neighbor 10.4.4.4 timers connect 10
+ !
+ address-family ipv4 vpn
+ neighbor 10.4.4.4 activate
+ exit-address-family
+
+!
+router bgp 65001 vrf VRF-a
+ bgp router-id 192.168.100.20
+ timers bgp 3 9
+ neighbor 192.168.100.10 remote-as 65001
+ neighbor 192.168.100.10 update-source 192.168.100.20
+ neighbor 192.168.200.10 remote-as 65001
+ neighbor 192.168.200.10 update-source 192.168.200.20
+ !
+ address-family ipv4 unicast
+ redistribute connected
+ redistribute isis
+ label vpn export 1111
+ rd vpn export 10:1
+ rt vpn both 1:1
+ export vpn
+ import vpn
+ exit-address-family
+
+router bgp 65002 vrf VRF-b
+ bgp router-id 192.168.10.20
+ timers bgp 3 9
+ neighbor 192.168.10.10 remote-as 65003
+ neighbor 192.168.10.10 update-source 192.168.10.20
+!
+ address-family ipv4 unicast
+ redistribute connected
+ redistribute isis
+ label vpn export 6666
+ rd vpn export 10:2
+ rt vpn both 1:2
+ export vpn
+ import vpn
+ exit-address-family
+
+agentx
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r1/isisd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r1/isisd.conf
new file mode 100644
index 0000000000..b5ca993da3
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r1/isisd.conf
@@ -0,0 +1,46 @@
+log stdout debugging
+!
+debug isis route-events
+debug isis events
+!
+interface r1-eth0
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ no isis hello padding
+ isis hello-interval 1
+ isis hello-multiplier 3
+ isis network point-to-point
+!
+interface r1-eth1
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ no isis hello padding
+ isis hello-interval 1
+ isis hello-multiplier 3
+ isis network point-to-point
+!
+interface r1-eth2
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ no isis hello padding
+ isis hello-interval 1
+ isis hello-multiplier 3
+ isis network point-to-point
+!
+interface lo
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ isis passive
+ no isis hello padding
+!
+router isis ISIS1
+ net 01.1111.0000.0000.0001.00
+ is-type level-1
+ topology ipv6-unicast
+!
+line vty
+!
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r1/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r1/snmpd.conf
new file mode 100644
index 0000000000..c903c1ad2e
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r1/snmpd.conf
@@ -0,0 +1,17 @@
+agentAddress udp:10.1.1.1:161
+
+com2sec public 10.1.1.1 public
+
+group public_group v1 public
+group public_group v2c public
+
+access public_group "" any noauth prefix all all none
+
+view all included .1
+
+iquerySecName frr
+rouser frr
+
+master agentx
+
+noRangeCheck yes \ No newline at end of file
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r1/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r1/zebra.conf
new file mode 100644
index 0000000000..7228ae6bd2
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r1/zebra.conf
@@ -0,0 +1,33 @@
+log file zebra.log
+!
+interface r1-eth0
+ ip address 192.168.12.12/24
+ ipv6 address 2000:1:1:12::12/64
+!
+interface r1-eth1
+ ip address 192.168.13.13/24
+ ipv6 address 2000:1:1:13::13/64
+!
+interface r1-eth2
+ ip address 192.168.14.14/24
+ ipv6 address 2000:1:1:14::14/64
+!
+interface r1-eth3 vrf VRF-a
+ ip address 192.168.100.20/24
+ ipv6 address 2000:1:1:100::20/64
+!
+interface r1-eth4 vrf VRF-a
+ ip address 192.168.200.20/24
+ ipv6 address 2000:1:1:200::20/64
+!
+interface r1-eth5 vrf VRF-b
+ ip address 192.168.300.20/24
+ ipv6 address 2000:1:1:300::20/64
+
+interface lo
+ ip address 10.1.1.1/32
+ ipv6 address 2000:1:1:1::1/128
+!
+!
+!
+line vty
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r2/isisd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r2/isisd.conf
new file mode 100644
index 0000000000..3dfa43831a
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r2/isisd.conf
@@ -0,0 +1,37 @@
+log stdout debugging
+!
+debug isis route-events
+debug isis events
+!
+interface r2-eth0
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ no isis hello padding
+ isis hello-interval 1
+ isis hello-multiplier 3
+ isis network point-to-point
+!
+interface r2-eth1
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ no isis hello padding
+ isis hello-interval 1
+ isis hello-multiplier 3
+ isis network point-to-point
+!
+interface lo
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ isis passive
+ no isis hello padding
+!
+router isis ISIS1
+ net 01.1111.0000.0000.0002.00
+ is-type level-1
+ topology ipv6-unicast
+!
+line vty
+!
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r2/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r2/snmpd.conf
new file mode 100644
index 0000000000..0cfebc7238
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r2/snmpd.conf
@@ -0,0 +1,15 @@
+agentAddress udp:10.2.2.2:161
+
+com2sec public localhost public
+
+group public_group v1 public
+group public_group v2c public
+
+access public_group "" any noauth prefix all all none
+
+view all included .1
+
+iquerySecName frr
+rouser frr
+
+master agentx
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r2/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r2/zebra.conf
new file mode 100644
index 0000000000..9bc4331bae
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r2/zebra.conf
@@ -0,0 +1,24 @@
+log file /tmp/zebra.log
+log stdout
+!
+debug zebra events
+debug zebra dplane
+!
+!
+interface r2-eth0
+ ip address 192.168.12.21/24
+ ipv6 address 2000:1:1:12::21/64
+!
+interface r2-eth1
+ ip address 192.168.23.23/24
+ ipv6 address 2000:1:1:23::23/64
+!
+!
+interface lo
+ ip address 10.2.2.2/32
+ ipv6 address 2000:2:2:2::2/128
+!
+!
+!
+line vty
+!
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r3/isisd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r3/isisd.conf
new file mode 100644
index 0000000000..578ebafad6
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r3/isisd.conf
@@ -0,0 +1,45 @@
+log stdout debugging
+!
+debug isis route-events
+debug isis events
+!
+interface r3-eth0
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ no isis hello padding
+ isis hello-interval 1
+ isis hello-multiplier 3
+ isis network point-to-point
+!
+interface r3-eth1
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ no isis hello padding
+ isis hello-interval 1
+ isis hello-multiplier 3
+ isis network point-to-point
+!
+interface r3-eth2
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ no isis hello padding
+ isis hello-interval 1
+ isis hello-multiplier 3
+ isis network point-to-point
+!
+interface lo
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ isis passive
+ no isis hello padding
+!
+router isis ISIS1
+ net 01.1111.0000.0000.0003.00
+ is-type level-1
+ topology ipv6-unicast
+!
+line vty
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r3/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r3/snmpd.conf
new file mode 100644
index 0000000000..b9eb00ea52
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r3/snmpd.conf
@@ -0,0 +1,15 @@
+agentAddress udp:10.3.3.3:161
+
+com2sec public localhost public
+
+group public_group v1 public
+group public_group v2c public
+
+access public_group "" any noauth prefix all all none
+
+view all included .1
+
+iquerySecName frr
+rouser frr
+
+master agentx
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r3/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r3/zebra.conf
new file mode 100644
index 0000000000..4d2007e787
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r3/zebra.conf
@@ -0,0 +1,27 @@
+log file /tmp/zebra.log
+log stdout
+!
+debug zebra events
+debug zebra dplane
+!
+!
+interface r3-eth0
+ ip address 192.168.13.31/24
+ ipv6 address 2000:1:1:13::31/64
+!
+interface r3-eth1
+ ip address 192.168.23.32/24
+ ipv6 address 2000:1:1:23::32/64
+!
+interface r3-eth2
+ ip address 192.168.34.34/24
+ ipv6 address 2000:1:1:34::34/64
+!
+!
+interface lo
+ ip address 10.3.3.3/32
+ ipv6 address 2000:3:3:3::3/128
+!
+!
+!
+line vty
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r4/bgpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r4/bgpd.conf
new file mode 100644
index 0000000000..2a834c799e
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r4/bgpd.conf
@@ -0,0 +1,43 @@
+log file /tmp/bgpd.log debugging
+!
+router bgp 65000
+ bgp router-id 10.4.4.4
+ timers bgp 3 9
+ neighbor 10.1.1.1 remote-as 65000
+ neighbor 10.1.1.1 update-source 10.4.4.4
+ neighbor 10.1.1.1 timers connect 10
+ !
+ address-family ipv4 vpn
+ neighbor 10.1.1.1 activate
+ exit-address-family
+!
+
+ address-family ipv6 vpn
+ neighbor 10.1.1.1 activate
+ exit-address-family
+!
+router bgp 65001 vrf VRF-a
+ bgp router-id 192.168.200.20
+ timers bgp 3 9
+ neighbor 192.168.200.10 remote-as 65001
+ neighbor 192.168.200.10 update-source 192.168.200.20
+ !
+ address-family ipv4 unicast
+ redistribute connected
+ redistribute isis
+ label vpn export 1111
+ rd vpn export 10:3
+ rt vpn both 1:1
+ export vpn
+ import vpn
+ exit-address-family
+
+ address-family ipv6 unicast
+ redistribute connected
+ redistribute isis
+ label vpn export 1111
+ rd vpn export 10:3
+ rt vpn both 1:2
+ export vpn
+ import vpn
+ exit-address-family
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r4/isisd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r4/isisd.conf
new file mode 100644
index 0000000000..3e9e9af45f
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r4/isisd.conf
@@ -0,0 +1,36 @@
+log stdout debugging
+!
+debug isis route-events
+debug isis events
+!
+interface r4-eth0
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ no isis hello padding
+ isis hello-interval 1
+ isis hello-multiplier 3
+ isis network point-to-point
+!
+interface r4-eth1
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ no isis hello padding
+ isis hello-interval 1
+ isis hello-multiplier 3
+ isis network point-to-point
+!
+interface lo
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ isis passive
+ no isis hello padding
+!
+router isis ISIS1
+ net 01.1111.0000.0000.0004.00
+ is-type level-1
+ topology ipv6-unicast
+!
+line vty
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r4/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r4/snmpd.conf
new file mode 100644
index 0000000000..ec35f9f9c9
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r4/snmpd.conf
@@ -0,0 +1,15 @@
+agentAddress udp:10.4.4.4:161
+
+com2sec public localhost public
+
+group public_group v1 public
+group public_group v2c public
+
+access public_group "" any noauth prefix all all none
+
+view all included .1
+
+iquerySecName frr
+rouser frr
+
+master agentx
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r4/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r4/zebra.conf
new file mode 100644
index 0000000000..c48407c108
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/r4/zebra.conf
@@ -0,0 +1,27 @@
+log file /tmp/zebra.log
+log stdout
+!
+debug zebra events
+debug zebra dplane
+!
+!
+interface r4-eth0
+ ip address 192.168.14.41/24
+ ipv6 address 2000:1:1:14::41/64
+!
+interface r4-eth1
+ ip address 192.168.34.43/24
+ ipv6 address 2000:1:1:34::43/64
+!
+interface r4-eth2 vrf aaa
+ ip address 192.168.200.20/24
+ ipv6 address 2000:1:1:200::20/64
+!
+!
+interface lo
+ ip address 10.4.4.4/32
+ ipv6 address 2000:4:4:4::4/128
+!
+!
+line vty
+!
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/test_bgp_snmp_mplsvpn.py b/tests/topotests/bgp-snmp-mplsl3vpn/test_bgp_snmp_mplsvpn.py
new file mode 100755
index 0000000000..5eb1738632
--- /dev/null
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/test_bgp_snmp_mplsvpn.py
@@ -0,0 +1,738 @@
+#!/usr/bin/env python
+
+#
+# test_bgp_snmp_mplsl3vpn.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2020 by Volta Networks
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_bgp_snmp_mplsl3vpn.py: Test mplsL3Vpn MIB [RFC4382].
+"""
+
+import os
+import sys
+import json
+from functools import partial
+from time import sleep
+import pytest
+import re
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.snmptest import SnmpTester
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+
+class TemplateTopo(Topo):
+ "Test topology builder"
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # This function only purpose is to define allocation and relationship
+ # between routers, switches and hosts.
+ #
+ #
+ # Create routers
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("r3")
+ tgen.add_router("r4")
+ tgen.add_router("ce1")
+ tgen.add_router("ce2")
+ tgen.add_router("ce3")
+ tgen.add_router("ce4")
+
+ # r1-r2
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ # r1-r3
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+
+ # r1-r4
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r4"])
+
+ # r1-ce1
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["ce1"])
+
+ # r1-ce3
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["ce3"])
+
+ # r1-ce4
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["ce4"])
+
+ # r1-dangling
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["r1"])
+
+ # r2-r3
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+
+ # r3-r4
+ switch = tgen.add_switch("s9")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
+
+ # r4-ce2
+ switch = tgen.add_switch("s10")
+ switch.add_link(tgen.gears["r4"])
+ switch.add_link(tgen.gears["ce2"])
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+
+ # skip tests is SNMP not installed
+ snmpd = os.system("which snmpd")
+ if snmpd:
+ error_msg = "SNMP not installed - skipping"
+ pytest.skip(error_msg)
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+ tgen.start_topology()
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+ r3 = tgen.gears["r3"]
+ r4 = tgen.gears["r4"]
+
+ # setup VRF-a in r1
+ r1.run("ip link add VRF-a type vrf table 1001")
+ r1.run("ip link set up dev VRF-a")
+ r1.run("ip link add VRF-b type vrf table 1002")
+ r1.run("ip link set up dev VRF-b")
+ r4.run("ip link add VRF-a type vrf table 1001")
+ r4.run("ip link set up dev VRF-a")
+
+ # enslave vrf interfaces
+ r1.run("ip link set r1-eth3 master VRF-a")
+ r1.run("ip link set r1-eth4 master VRF-a")
+ r1.run("ip link set r1-eth5 master VRF-b")
+ r4.run("ip link set r4-eth1 master VRF-a")
+
+ r1.run("sysctl -w net.ipv4.ip_forward=1")
+ r2.run("sysctl -w net.ipv4.ip_forward=1")
+ r3.run("sysctl -w net.ipv4.ip_forward=1")
+ r4.run("sysctl -w net.ipv4.ip_forward=1")
+ r1.run("sysctl -w net.mpls.conf.r1-eth0.input=1")
+ r1.run("sysctl -w net.mpls.conf.r1-eth1.input=1")
+ r1.run("sysctl -w net.mpls.conf.r1-eth2.input=1")
+ r2.run("sysctl -w net.mpls.conf.r1-eth0.input=1")
+ r2.run("sysctl -w net.mpls.conf.r1-eth1.input=1")
+ r3.run("sysctl -w net.mpls.conf.r1-eth0.input=1")
+ r3.run("sysctl -w net.mpls.conf.r1-eth1.input=1")
+ r3.run("sysctl -w net.mpls.conf.r1-eth2.input=1")
+ r4.run("sysctl -w net.mpls.conf.r1-eth0.input=1")
+ r4.run("sysctl -w net.mpls.conf.r1-eth1.input=1")
+
+ router_list = tgen.routers()
+
+ # For all registred routers, load the zebra configuration file
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP,
+ os.path.join(CWD, "{}/bgpd.conf".format(rname)),
+ "-M snmp",
+ )
+ router.load_config(
+ TopoRouter.RD_SNMP,
+ os.path.join(CWD, "{}/snmpd.conf".format(rname)),
+ "-Le -Ivacm_conf,usmConf,iquery -V -DAgentX,trap",
+ )
+
+ # After loading the configurations, this function loads configured daemons.
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+
+
+# SNMP utilities - maybe move to lib
+def snmp_uint32_to_oid(val):
+ oid1 = int(val / 16777216) % 256
+ oid2 = int(val / 65536) % 256
+ oid3 = int(val / 256) % 256
+ oid4 = int(val) % 256
+ return "%(oid1)s.%(oid2)s.%(oid3)s.%(oid4)s" % locals()
+
+
+def snmp_oid_to_uint32(oid):
+ values = oid.split(".")
+ return (
+ (int(values[0]) * 16777216)
+ + (int(values[1]) * 65536)
+ + (int(values[2]) * 256)
+ + int(values[3])
+ )
+
+
+def snmp_str_to_oid(str):
+ out_oid = ""
+ for char in str:
+ out_oid += "{}.".format(ord(char))
+ return out_oid.rstrip(".")
+
+
+def snmp_oid_to_str(oid):
+ out_str = ""
+ oids = oid.split(".")
+ for char in oids:
+ out_str += "{}".format(chr(int(char)))
+ return out_str
+
+
+def snmp_rte_oid(vrf, dtype, dest, plen, policy, ntype, nhop=0):
+ oid_1 = snmp_str_to_oid(vrf)
+ oid_2 = dtype
+ oid_3 = dest
+ oid_4 = plen
+ oid_5 = "0.{}".format(policy)
+ oid_6 = ntype
+ if ntype == 0:
+ oid_7 = ""
+ else:
+ oid_7 = ".{}".format(nhop)
+
+ return "{}.{}.{}.{}.{}.{}{}".format(oid_1, oid_2, oid_3, oid_4, oid_5, oid_6, oid_7)
+
+
+def test_pe1_converge_evpn():
+ "Wait for protocol convergence"
+ tgen = get_topogen()
+
+ r1 = tgen.net.get("r1")
+ r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
+
+ assertmsg = "BGP SNMP does not seem to be running"
+ assert r1_snmp.test_oid("bgpVersion", "10"), assertmsg
+ count = 0
+ passed = False
+ while count < 125:
+ if r1_snmp.test_oid_walk("bgpPeerLocalAddr.10.4.4.4", ["10.1.1.1"]):
+ passed = True
+ break
+ count += 1
+ sleep(1)
+ #tgen.mininet_cli()
+ assertmsg = "BGP Peer 10.4.4.4 did not connect"
+ assert passed, assertmsg
+
+
+interfaces_up_test = {
+ "mplsL3VpnConfiguredVrfs": "2",
+ "mplsL3VpnActiveVrfs": "2",
+ "mplsL3VpnConnectedInterfaces": "3",
+ "mplsL3VpnNotificationEnable": "true(1)",
+ "mplsL3VpnVrfConfMaxPossRts": "0",
+ "mplsL3VpnVrfConfRteMxThrshTime": "0 seconds",
+ "mplsL3VpnIlllblRcvThrsh": "0",
+}
+
+interfaces_down_test = {
+ "mplsL3VpnConfiguredVrfs": "2",
+ "mplsL3VpnActiveVrfs": "1",
+ "mplsL3VpnConnectedInterfaces": "3",
+ "mplsL3VpnNotificationEnable": "true(1)",
+ "mplsL3VpnVrfConfMaxPossRts": "0",
+ "mplsL3VpnVrfConfRteMxThrshTime": "0 seconds",
+ "mplsL3VpnIlllblRcvThrsh": "0",
+}
+
+
+def test_r1_mplsvpn_scalars():
+ "check scalar values"
+ tgen = get_topogen()
+ r1 = tgen.net.get("r1")
+ r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
+
+ for item in interfaces_up_test.keys():
+ assertmsg = "{} should be {}: value {}".format(
+ item, interfaces_up_test[item], r1_snmp.get_next(item)
+ )
+ assert r1_snmp.test_oid(item, interfaces_up_test[item]), assertmsg
+
+
+def test_r1_mplsvpn_scalars_interface():
+ "check scalar interface changing values"
+ tgen = get_topogen()
+ r1 = tgen.net.get("r1")
+ r1_cmd = tgen.gears["r1"]
+ r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
+
+ r1_cmd.vtysh_cmd("conf t\ninterface r1-eth3\nshutdown")
+ r1_cmd.vtysh_cmd("conf t\ninterface r1-eth4\nshutdown")
+
+ for item in interfaces_up_test.keys():
+ assertmsg = "{} should be {}: value {}".format(
+ item, interfaces_down_test[item], r1_snmp.get_next(item)
+ )
+ assert r1_snmp.test_oid(item, interfaces_down_test[item]), assertmsg
+
+ r1_cmd.vtysh_cmd("conf t\ninterface r1-eth3\nno shutdown")
+ r1_cmd.vtysh_cmd("conf t\ninterface r1-eth4\nno shutdown")
+
+ for item in interfaces_up_test.keys():
+ assertmsg = "{} should be {}: value {}".format(
+ item, interfaces_up_test[item], r1_snmp.get_next(item)
+ )
+ assert r1_snmp.test_oid(item, interfaces_up_test[item]), assertmsg
+
+
+def router_interface_get_ifindex(router, interface):
+ ifindex = 0
+ r_int_output = router.vtysh_cmd(
+ "show interface {}-{}".format(router.name, interface)
+ )
+ int_lines = r_int_output.splitlines()
+ for line in int_lines:
+ line_items = line.lstrip().split(" ")
+ if "index" in line_items[0]:
+ ifindex = line_items[1]
+ return ifindex
+
+
+def generate_vrf_ifindex_oid(vrf, ifindex):
+
+ intoid = snmp_uint32_to_oid(int(ifindex))
+ vrfoid = snmp_str_to_oid(vrf)
+ oid = "{}.{}".format(vrfoid, intoid)
+
+ return oid
+
+
+def generate_vrf_index_type_oid(vrf, index, type):
+ vrfoid = snmp_str_to_oid(vrf)
+ intoid = snmp_uint32_to_oid(int(index))
+ oid = "{}.{}.{}".format(vrfoid, intoid, type)
+
+ return oid
+
+
+iftable_up_test = {
+ "mplsL3VpnIfVpnClassification": ["enterprise(2)", "enterprise(2)", "enterprise(2)"],
+ "mplsL3VpnIfConfStorageType": ["volatile(2)", "volatile(2)", "volatile(2)"],
+ "mplsL3VpnIfConfRowStatus": ["active(1)", "active(1)", "active(1)"],
+}
+
+
+def get_timetick_val(time):
+ return int(time.split(" ")[0].lstrip("(").rstrip(")"))
+
+
+def test_r1_mplsvpn_IfTable():
+ "mplsL3VpnIf table values"
+
+ tgen = get_topogen()
+ r1 = tgen.net.get("r1")
+ r1r = tgen.gears["r1"]
+
+ r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
+
+ # tgen.mininet_cli()
+ eth3_ifindex = router_interface_get_ifindex(r1r, "eth3")
+ eth4_ifindex = router_interface_get_ifindex(r1r, "eth4")
+ eth5_ifindex = router_interface_get_ifindex(r1r, "eth5")
+
+ # get ifindex and make sure the oid is correct
+
+ oids = []
+ # generate oid
+ oids.append(generate_vrf_ifindex_oid("VRF-a", eth3_ifindex))
+ oids.append(generate_vrf_ifindex_oid("VRF-a", eth4_ifindex))
+ oids.append(generate_vrf_ifindex_oid("VRF-b", eth5_ifindex))
+
+ for item in iftable_up_test.keys():
+ assertmsg = "{} should be {} oids {} full dict {}:".format(
+ item, iftable_up_test[item], oids, r1_snmp.walk(item)
+ )
+ assert r1_snmp.test_oid_walk(item, iftable_up_test[item], oids), assertmsg
+
+ # an inactive vrf should not affect these values
+ r1.cmd("ip link set r1-eth5 down")
+
+ for item in iftable_up_test.keys():
+ assertmsg = "{} should be {} oids {} full dict {}:".format(
+ item, iftable_up_test[item], oids, r1_snmp.walk(item)
+ )
+ assert r1_snmp.test_oid_walk(item, iftable_up_test[item], oids), assertmsg
+
+ r1.cmd("ip link set r1-eth5 up")
+
+
+vrftable_test = {
+ "mplsL3VpnVrfDescription": ["VRF-a", "VRF-b"],
+ "mplsL3VpnVrfRD": ['"10:1"', '"10:2"'],
+ "mplsL3VpnVrfOperStatus": ["up(1)", "up(1)"],
+ "mplsL3VpnVrfActiveInterfaces": ["2", "1"],
+ "mplsL3VpnVrfAssociatedInterfaces": ["2", "1"],
+ "mplsL3VpnVrfConfMidRteThresh": ["0", "0"],
+ "mplsL3VpnVrfConfHighRteThresh": ["0", "0"],
+ "mplsL3VpnVrfConfMaxRoutes": ["0", "0"],
+ "mplsL3VpnVrfConfRowStatus": ["active(1)", "active(1)"],
+ "mplsL3VpnVrfConfAdminStatus": ["up(1)", "up(1)"],
+ "mplsL3VpnVrfConfStorageType": ["volatile(2)", "volatile(2)"],
+}
+
+
+def test_r1_mplsvpn_VrfTable():
+ tgen = get_topogen()
+
+ r1 = tgen.net.get("r1")
+ r1r = tgen.gears["r1"]
+
+ r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
+
+ # tgen.mininet_cli()
+
+ oids = []
+
+ oids.append(snmp_str_to_oid("VRF-a"))
+ oids.append(snmp_str_to_oid("VRF-b"))
+
+ # check items
+ for item in vrftable_test.keys():
+ assertmsg = "{} should be {} oids {} full dict {}:".format(
+ item, vrftable_test[item], oids, r1_snmp.walk(item)
+ )
+ assert r1_snmp.test_oid_walk(item, vrftable_test[item], oids), assertmsg
+
+ # check timetick set and stable
+ ts_a = r1_snmp.get("mplsL3VpnVrfCreationTime.{}".format(snmp_str_to_oid("VRF-a")))
+ ts_b = r1_snmp.get("mplsL3VpnVrfCreationTime.{}".format(snmp_str_to_oid("VRF-b")))
+ ts_val_a1 = get_timetick_val(ts_a)
+ ts_val_b1 = get_timetick_val(ts_b)
+ ts_a = r1_snmp.get("mplsL3VpnVrfCreationTime.{}".format(snmp_str_to_oid("VRF-a")))
+ ts_b = r1_snmp.get("mplsL3VpnVrfCreationTime.{}".format(snmp_str_to_oid("VRF-b")))
+ ts_val_a2 = get_timetick_val(ts_a)
+ ts_val_b2 = get_timetick_val(ts_b)
+
+ assertmsg = "timestamp values for VRF-a do not match {} {}".format(
+ ts_val_a1, ts_val_a2
+ )
+ assert ts_val_a1 == ts_val_a2, assertmsg
+ assertmsg = "timestamp values for VRF-b do not match {} {}".format(
+ ts_val_b1, ts_val_b2
+ )
+ assert ts_val_b1 == ts_val_b2, assertmsg
+
+ # take Last changed time, fiddle with active interfaces, ensure
+ # time changes and active interfaces change
+ ts_last = r1_snmp.get(
+ "mplsL3VpnVrfConfLastChanged.{}".format(snmp_str_to_oid("VRF-a"))
+ )
+ ts_val_last_1 = get_timetick_val(ts_last)
+ r1r.vtysh_cmd("conf t\ninterface r1-eth3\nshutdown")
+ active_int = r1_snmp.get(
+ "mplsL3VpnVrfActiveInterfaces.{}".format(snmp_str_to_oid("VRF-a"))
+ )
+ assertmsg = "mplsL3VpnVrfActiveInterfaces incorrect should be 1 value {}".format(
+ active_int
+ )
+ assert active_int == "1", assertmsg
+
+ ts_last = r1_snmp.get(
+ "mplsL3VpnVrfConfLastChanged.{}".format(snmp_str_to_oid("VRF-a"))
+ )
+ ts_val_last_2 = get_timetick_val(ts_last)
+ assertmsg = "mplsL3VpnVrfConfLastChanged does not update on interface change"
+ assert ts_val_last_2 > ts_val_last_1, assertmsg
+ r1r.vtysh_cmd("conf t\ninterface r1-eth3\nno shutdown")
+
+ # take Last changed time, fiddle with associated interfaces, ensure
+ # time changes and active interfaces change
+ ts_last = r1_snmp.get(
+ "mplsL3VpnVrfConfLastChanged.{}".format(snmp_str_to_oid("VRF-a"))
+ )
+ ts_val_last_1 = get_timetick_val(ts_last)
+ r1.cmd("ip link set r1-eth6 master VRF-a")
+ r1.cmd("ip link set r1-eth6 up")
+
+ associated_int = r1_snmp.get(
+ "mplsL3VpnVrfAssociatedInterfaces.{}".format(snmp_str_to_oid("VRF-a"))
+ )
+ assertmsg = "mplsL3VpnVrfAssociatedInterfaces incorrect should be 3 value {}".format(
+ associated_int
+ )
+
+ assert associated_int == "3", assertmsg
+ ts_last = r1_snmp.get(
+ "mplsL3VpnVrfConfLastChanged.{}".format(snmp_str_to_oid("VRF-a"))
+ )
+ ts_val_last_2 = get_timetick_val(ts_last)
+ assertmsg = "mplsL3VpnVrfConfLastChanged does not update on interface change"
+ assert ts_val_last_2 > ts_val_last_1, assertmsg
+ r1.cmd("ip link del r1-eth6 master VRF-a")
+ r1.cmd("ip link set r1-eth6 down")
+
+
+rt_table_test = {
+ "mplsL3VpnVrfRT": ['"1:1"', '"1:2"'],
+ "mplsL3VpnVrfRTDescr": ["RT both for VRF VRF-a", "RT both for VRF VRF-b"],
+ "mplsL3VpnVrfRTRowStatus": ["active(1)", "active(1)"],
+ "mplsL3VpnVrfRTStorageType": ["volatile(2)", "volatile(2)"],
+}
+
+
+def test_r1_mplsvpn_VrfRT_table():
+ tgen = get_topogen()
+
+ r1 = tgen.net.get("r1")
+ r1r = tgen.gears["r1"]
+
+ r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
+
+ oids = []
+ oids.append(generate_vrf_index_type_oid("VRF-a", 1, 3))
+ oids.append(generate_vrf_index_type_oid("VRF-b", 1, 3))
+
+ # check items
+ for item in rt_table_test.keys():
+ print(item)
+ assertmsg = "{} should be {} oids {} full dict {}:".format(
+ item, rt_table_test[item], oids, r1_snmp.walk(item)
+ )
+ assert r1_snmp.test_oid_walk(item, rt_table_test[item], oids), assertmsg
+
+
+def test_r1_mplsvpn_perf_table():
+ tgen = get_topogen()
+
+ r1 = tgen.net.get("r1")
+ r1r = tgen.gears["r1"]
+
+ r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
+
+ # tgen.mininet_cli()
+ oid_a = snmp_str_to_oid("VRF-a")
+ oid_b = snmp_str_to_oid("VRF-b")
+
+ # poll for 10 seconds for routes to appear
+ count = 0
+ passed = False
+ while count < 60:
+ if r1_snmp.test_oid_walk(
+ "mplsL3VpnVrfPerfCurrNumRoutes.{}".format(oid_a), ["7"]
+ ):
+ passed = True
+ break
+ count += 1
+ sleep(1)
+ # tgen.mininet_cli()
+ assertmsg = "mplsL3VpnVrfPerfCurrNumRoutes shouold be 7 got {}".format(
+ r1_snmp.get("mplsL3VpnVrfPerfCurrNumRoutes.{}".format(oid_a))
+ )
+ assert passed, assertmsg
+ curr_a = int(r1_snmp.get("mplsL3VpnVrfPerfCurrNumRoutes.{}".format(oid_a)))
+ del_a = int(r1_snmp.get("mplsL3VpnVrfPerfRoutesDeleted.{}".format(oid_a)))
+ add_a = int(r1_snmp.get("mplsL3VpnVrfPerfRoutesAdded.{}".format(oid_a)))
+
+ assertmsg = "FAIL curr{} does not equal added{} - deleted {}".format(
+ curr_a, add_a, del_a
+ )
+ assert curr_a == (add_a - del_a), assertmsg
+ curr_b = int(r1_snmp.get("mplsL3VpnVrfPerfCurrNumRoutes.{}".format(oid_b)))
+ del_b = int(r1_snmp.get("mplsL3VpnVrfPerfRoutesDeleted.{}".format(oid_b)))
+ add_b = int(r1_snmp.get("mplsL3VpnVrfPerfRoutesAdded.{}".format(oid_b)))
+ assertmsg = "FAIL curr{} does not equal added{} - deleted {}".format(
+ curr_b, add_b, del_b
+ )
+ assert curr_b == (add_b - del_b), assertmsg
+
+
+rte_table_test = {
+ "mplsL3VpnVrfRteInetCidrDestType": [
+ "ipv4(1)",
+ "ipv4(1)",
+ "ipv4(1)",
+ "ipv4(1)",
+ "ipv4(1)",
+ "ipv4(1)",
+ "ipv4(1)",
+ ],
+ "mplsL3VpnVrfRteInetCidrDest": [
+ "0A 05 05 05",
+ "0A 07 07 07",
+ "C0 A8 22 00",
+ "C0 A8 64 00",
+ "C0 A8 64 00",
+ "C0 A8 C8 00",
+ "C0 A8 C8 00",
+ ],
+ "mplsL3VpnVrfRteInetCidrPfxLen": ["32", "32", "24", "24", "24", "24", "24"],
+ "mplsL3VpnVrfRteInetCidrNHopType": [
+ "ipv4(1)",
+ "ipv4(1)",
+ "ipv4(1)",
+ "ipv4(1)",
+ "unknown(0)",
+ "ipv4(1)",
+ "unknown(0)",
+ ],
+ "mplsL3VpnVrfRteInetCidrNextHop": [
+ "C0 A8 64 0A",
+ "C0 A8 C8 0A",
+ "0A 04 04 04",
+ "C0 A8 64 0A",
+ '""',
+ "C0 A8 C8 0A",
+ '""',
+ ],
+ "mplsL3VpnVrfRteInetCidrIfIndex": ["5", "6", "4", "5", "0", "6", "0"],
+ "mplsL3VpnVrfRteInetCidrType": [
+ "local(3)",
+ "local(3)",
+ "remote(4)",
+ "local(3)",
+ "other(1)",
+ "local(3)",
+ "other(1)",
+ ],
+ "mplsL3VpnVrfRteInetCidrProto": [
+ "bgp(14)",
+ "bgp(14)",
+ "bgp(14)",
+ "bgp(14)",
+ "local(2)",
+ "bgp(14)",
+ "local(2)",
+ ],
+ "mplsL3VpnVrfRteInetCidrNextHopAS": ["65001", "65001", "0", "65001", "0", "65001", "0"],
+ "mplsL3VpnVrfRteInetCidrMetric1": ["0", "0", "20", "0", "0", "0", "0"],
+ "mplsL3VpnVrfRteInetCidrMetric2": ["-1", "-1", "-1", "-1", "-1", "-1", "-1"],
+ "mplsL3VpnVrfRteInetCidrMetric3": ["-1", "-1", "-1", "-1", "-1", "-1", "-1"],
+ "mplsL3VpnVrfRteInetCidrMetric4": ["-1", "-1", "-1", "-1", "-1", "-1", "-1"],
+ "mplsL3VpnVrfRteInetCidrMetric5": ["-1", "-1", "-1", "-1", "-1", "-1", "-1"],
+ "mplsL3VpnVrfRteXCPointer": ["00", "00", "00", "00", "00", "00", "00"],
+ "mplsL3VpnVrfRteInetCidrStatus": [
+ "active(1)",
+ "active(1)",
+ "active(1)",
+ "active(1)",
+ "active(1)",
+ "active(1)",
+ "active(1)",
+ ],
+}
+
+
+def test_r1_mplsvpn_rte_table():
+ tgen = get_topogen()
+
+ r1 = tgen.net.get("r1")
+ r1r = tgen.gears["r1"]
+
+ r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
+
+ # tgen.mininet_cli()
+ oid_1 = snmp_rte_oid("VRF-a", 1, "10.5.5.5", 32, 0, 1, "192.168.100.10")
+ oid_2 = snmp_rte_oid("VRF-a", 1, "10.7.7.7", 32, 0, 1, "192.168.200.10")
+ oid_3 = snmp_rte_oid("VRF-a", 1, "192.168.34.0", 24, 0, 1, "10.4.4.4")
+ oid_4 = snmp_rte_oid("VRF-a", 1, "192.168.100.0", 24, 1, 1, "192.168.100.10")
+ oid_4_a = snmp_rte_oid("VRF-a", 1, "192.168.100.0", 24, 0, 1, "192.168.100.10")
+ oid_5 = snmp_rte_oid("VRF-a", 1, "192.168.100.0", 24, 0, 0)
+ oid_5_a = snmp_rte_oid("VRF-a", 1, "192.168.100.0", 24, 1, 0)
+ oid_6 = snmp_rte_oid("VRF-a", 1, "192.168.200.0", 24, 1, 1, "192.168.200.10")
+ oid_6_a = snmp_rte_oid("VRF-a", 1, "192.168.200.0", 24, 0, 1, "192.168.200.10")
+ oid_7 = snmp_rte_oid("VRF-a", 1, "192.168.200.0", 24, 0, 0)
+ oid_7_a = snmp_rte_oid("VRF-a", 1, "192.168.200.0", 24, 1, 0)
+
+ oid_lists = [
+ [oid_1, oid_2, oid_3, oid_4, oid_5, oid_6, oid_7],
+ [oid_1, oid_2, oid_3, oid_4_a, oid_5_a, oid_6, oid_7],
+ [oid_1, oid_2, oid_3, oid_4, oid_5, oid_6_a, oid_7_a],
+ [oid_1, oid_2, oid_3, oid_4_a, oid_5_a, oid_6_a, oid_7_a],
+ [oid_1, oid_2, oid_3, oid_4, oid_5, oid_6, oid_7],
+ [oid_1, oid_2, oid_3, oid_4_a, oid_5_a, oid_6, oid_7],
+ [oid_1, oid_2, oid_3, oid_4, oid_5, oid_6_a, oid_7_a],
+ [oid_1, oid_2, oid_3, oid_4_a, oid_5_a, oid_6_a, oid_7_a],
+ ]
+
+ # check items
+
+ passed = False
+ for oid_list in oid_lists:
+ passed = True
+ for item in rte_table_test.keys():
+ print(item)
+ assertmsg = "{} should be {} oids {} full dict {}:".format(
+ item, rte_table_test[item], oid_list, r1_snmp.walk(item)
+ )
+ if not r1_snmp.test_oid_walk(item, rte_table_test[item], oid_list):
+ passed = False
+ break
+ print(
+ "{} should be {} oids {} full dict {}:".format(
+ item, rte_table_test[item], oid_list, r1_snmp.walk(item)
+ )
+ )
+ if passed:
+ break
+ print("passed {}".format(passed))
+ assert passed, assertmsg
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_blackhole_community/__init__.py b/tests/topotests/bgp_blackhole_community/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_blackhole_community/__init__.py
diff --git a/tests/topotests/bgp_blackhole_community/r1/bgpd.conf b/tests/topotests/bgp_blackhole_community/r1/bgpd.conf
new file mode 100644
index 0000000000..574d199aeb
--- /dev/null
+++ b/tests/topotests/bgp_blackhole_community/r1/bgpd.conf
@@ -0,0 +1,14 @@
+!
+router bgp 65001
+ timers bgp 3 9
+ no bgp ebgp-requires-policy
+ neighbor r1-eth0 interface remote-as external
+ address-family ipv4 unicast
+ redistribute connected
+ neighbor r1-eth0 route-map r2 out
+ exit-address-family
+ !
+!
+route-map r2 permit 10
+ set community blackhole
+!
diff --git a/tests/topotests/bgp_blackhole_community/r1/zebra.conf b/tests/topotests/bgp_blackhole_community/r1/zebra.conf
new file mode 100644
index 0000000000..70dc5e516d
--- /dev/null
+++ b/tests/topotests/bgp_blackhole_community/r1/zebra.conf
@@ -0,0 +1,10 @@
+!
+interface lo
+ ip address 172.16.255.254/32
+!
+interface r1-eth0
+ ip address 192.168.0.1/24
+!
+ip forwarding
+!
+
diff --git a/tests/topotests/bgp_blackhole_community/r2/bgpd.conf b/tests/topotests/bgp_blackhole_community/r2/bgpd.conf
new file mode 100644
index 0000000000..2260613253
--- /dev/null
+++ b/tests/topotests/bgp_blackhole_community/r2/bgpd.conf
@@ -0,0 +1,8 @@
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ timers bgp 3 9
+ neighbor r2-eth0 interface remote-as external
+ neighbor r2-eth1 interface remote-as external
+ neighbor r2-eth2 interface remote-as internal
+!
diff --git a/tests/topotests/bgp_blackhole_community/r2/zebra.conf b/tests/topotests/bgp_blackhole_community/r2/zebra.conf
new file mode 100644
index 0000000000..cf6fb6d984
--- /dev/null
+++ b/tests/topotests/bgp_blackhole_community/r2/zebra.conf
@@ -0,0 +1,12 @@
+!
+interface r2-eth0
+ ip address 192.168.0.2/24
+!
+interface r2-eth1
+ ip address 192.168.1.1/24
+!
+interface r2-eth2
+ ip address 192.168.2.1/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_blackhole_community/r3/bgpd.conf b/tests/topotests/bgp_blackhole_community/r3/bgpd.conf
new file mode 100644
index 0000000000..d0c4b400f7
--- /dev/null
+++ b/tests/topotests/bgp_blackhole_community/r3/bgpd.conf
@@ -0,0 +1,6 @@
+!
+router bgp 65003
+ timers bgp 3 9
+ no bgp ebgp-requires-policy
+ neighbor r3-eth0 interface remote-as external
+!
diff --git a/tests/topotests/bgp_blackhole_community/r3/zebra.conf b/tests/topotests/bgp_blackhole_community/r3/zebra.conf
new file mode 100644
index 0000000000..05ab56d6f1
--- /dev/null
+++ b/tests/topotests/bgp_blackhole_community/r3/zebra.conf
@@ -0,0 +1,6 @@
+!
+interface r3-eth0
+ ip address 192.168.1.2/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_blackhole_community/r4/bgpd.conf b/tests/topotests/bgp_blackhole_community/r4/bgpd.conf
new file mode 100644
index 0000000000..0ac963e642
--- /dev/null
+++ b/tests/topotests/bgp_blackhole_community/r4/bgpd.conf
@@ -0,0 +1,6 @@
+!
+router bgp 65002
+ timers bgp 3 9
+ no bgp ebgp-requires-policy
+ neighbor r4-eth0 interface remote-as internal
+!
diff --git a/tests/topotests/bgp_blackhole_community/r4/zebra.conf b/tests/topotests/bgp_blackhole_community/r4/zebra.conf
new file mode 100644
index 0000000000..e2ccaed52a
--- /dev/null
+++ b/tests/topotests/bgp_blackhole_community/r4/zebra.conf
@@ -0,0 +1,6 @@
+!
+interface r4-eth0
+ ip address 192.168.2.2/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py b/tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py
new file mode 100644
index 0000000000..a856c9278f
--- /dev/null
+++ b/tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2021 by
+# Donatas Abraitis <donatas.abraitis@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Test if 172.16.255.254/32 tagged with BLACKHOLE community is not
+re-advertised downstream outside local AS.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from mininet.topo import Topo
+from lib.common_config import step
+
+
+class TemplateTopo(Topo):
+ def build(self, *_args, **_opts):
+ tgen = get_topogen(self)
+
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r4"])
+
+
+def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_blackhole_community():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ def _bgp_converge():
+ output = json.loads(
+ tgen.gears["r2"].vtysh_cmd("show ip bgp 172.16.255.254/32 json")
+ )
+ expected = {"paths": [{"community": {"list": ["blackhole", "noExport"]}}]}
+ return topotest.json_cmp(output, expected)
+
+ def _bgp_no_advertise_ebgp():
+ output = json.loads(
+ tgen.gears["r2"].vtysh_cmd(
+ "show ip bgp neighbor r2-eth1 advertised-routes json"
+ )
+ )
+ expected = {
+ "advertisedRoutes": {},
+ "totalPrefixCounter": 0,
+ "filteredPrefixCounter": 0,
+ }
+
+ return topotest.json_cmp(output, expected)
+
+ def _bgp_no_advertise_ibgp():
+ output = json.loads(
+ tgen.gears["r2"].vtysh_cmd(
+ "show ip bgp neighbor r2-eth2 advertised-routes json"
+ )
+ )
+ expected = {
+ "advertisedRoutes": {"172.16.255.254/32": {}},
+ "totalPrefixCounter": 2,
+ }
+
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge)
+ success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+
+ assert result is None, 'Failed bgp convergence in "{}"'.format(tgen.gears["r2"])
+
+ step("Check if 172.16.255.254/32 is not advertised to eBGP peers")
+
+ test_func = functools.partial(_bgp_no_advertise_ebgp)
+ success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+
+ assert (
+ result is None
+ ), 'Advertised blackhole tagged prefix to eBGP peers in "{}"'.format(
+ tgen.gears["r2"]
+ )
+
+ step("Check if 172.16.255.254/32 is advertised to iBGP peers")
+
+ test_func = functools.partial(_bgp_no_advertise_ibgp)
+ success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+
+ assert (
+ result is None
+ ), 'Withdrawn blackhole tagged prefix to iBGP peers in "{}"'.format(
+ tgen.gears["r2"]
+ )
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py b/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py
index 07623af063..d8c0cdc2fd 100644
--- a/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py
+++ b/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py
@@ -34,6 +34,8 @@ import pytest
import json
from functools import partial
+pytestmark = pytest.mark.pimd
+
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
@@ -97,7 +99,7 @@ class NetworkTopo(Topo):
##
#####################################################
-@pytest.mark.pim
+
def setup_module(module):
"Setup topology"
tgen = Topogen(NetworkTopo, module.__name__)
diff --git a/tests/topotests/example-test/test_template.py b/tests/topotests/example-test/test_template.py
index 4305e0199f..973303b830 100644
--- a/tests/topotests/example-test/test_template.py
+++ b/tests/topotests/example-test/test_template.py
@@ -44,6 +44,18 @@ from lib.topolog import logger
from mininet.topo import Topo
+#TODO: select markers based on daemons used during test
+# pytest module level markers
+"""
+pytestmark = pytest.mark.bfdd # single marker
+pytestmark = [
+ pytest.mark.bgpd,
+ pytest.mark.ospfd,
+ pytest.mark.ospf6d
+] # multiple markers
+"""
+
+
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py
index f24f463b8a..cd48716905 100755
--- a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py
+++ b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py
@@ -53,6 +53,19 @@ from lib.topolog import logger
from lib.bgp import verify_bgp_convergence
from lib.topojson import build_topo_from_json, build_config_from_json
+
+#TODO: select markers based on daemons used during test
+# pytest module level markers
+"""
+pytestmark = pytest.mark.bfdd # single marker
+pytestmark = [
+ pytest.mark.bgpd,
+ pytest.mark.ospfd,
+ pytest.mark.ospf6d
+] # multiple markers
+"""
+
+
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/example_topojson_multiple_links.json".format(CWD)
try:
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py
index 3ae3c9f4fe..0c72e30044 100755
--- a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py
@@ -52,6 +52,19 @@ from lib.topolog import logger
from lib.bgp import verify_bgp_convergence
from lib.topojson import build_topo_from_json, build_config_from_json
+
+#TODO: select markers based on daemons used during test
+# pytest module level markers
+"""
+pytestmark = pytest.mark.bfdd # single marker
+pytestmark = [
+ pytest.mark.bgpd,
+ pytest.mark.ospfd,
+ pytest.mark.ospf6d
+] # multiple markers
+"""
+
+
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/example_topojson.json".format(CWD)
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
index 06fa2f4626..d05ad6db21 100755
--- a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
@@ -54,6 +54,19 @@ from lib.topolog import logger
from lib.bgp import verify_bgp_convergence
from lib.topojson import build_topo_from_json, build_config_from_json
+
+#TODO: select markers based on daemons used during test
+# pytest module level markers
+"""
+pytestmark = pytest.mark.bfdd # single marker
+pytestmark = [
+ pytest.mark.bgpd,
+ pytest.mark.ospfd,
+ pytest.mark.ospf6d
+] # multiple markers
+"""
+
+
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/example_topojson.json".format(CWD)
diff --git a/tests/topotests/isis-lsp-bits-topo1/__init__.py b/tests/topotests/isis-lsp-bits-topo1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/__init__.py
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/isisd.conf b/tests/topotests/isis-lsp-bits-topo1/rt1/isisd.conf
new file mode 100644
index 0000000000..90764a0d0f
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt1/isisd.conf
@@ -0,0 +1,27 @@
+password 1
+hostname rt1
+log file isisd.log
+!
+debug isis events
+debug isis route-events
+debug isis spf-events
+debug isis sr-events
+debug isis lsp-gen
+!
+interface lo
+ ip router isis 1
+ ipv6 router isis 1
+ isis passive
+!
+interface eth-sw1
+ ip router isis 1
+ ipv6 router isis 1
+ isis hello-multiplier 3
+ isis priority 100
+!
+router isis 1
+ net 49.0000.0000.0000.0001.00
+ is-type level-1
+ lsp-gen-interval 2
+ topology ipv6-unicast
+!
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ip_route.ref
new file mode 100644
index 0000000000..8557f4b010
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ip_route.ref
@@ -0,0 +1,89 @@
+{
+ "0.0.0.0\/0":[
+ {
+ "prefix":"0.0.0.0\/0",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":10,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ },
+ {
+ "fib":true,
+ "ip":"10.0.1.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2.2.2.2\/32":[
+ {
+ "prefix":"2.2.2.2\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "3.3.3.3\/32":[
+ {
+ "prefix":"3.3.3.3\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.1.0\/24":[
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1"
+ },
+ {
+ "ip":"10.0.1.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ipv6_route.ref
new file mode 100644
index 0000000000..fa76533756
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ipv6_route.ref
@@ -0,0 +1,65 @@
+{
+ "::\/0":[
+ {
+ "prefix":"::\/0",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":10,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ },
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::2\/128":[
+ {
+ "prefix":"2001:db8:1000::2\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::3\/128":[
+ {
+ "prefix":"2001:db8:1000::3\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref
new file mode 100644
index 0000000000..26f0dffa7a
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref
@@ -0,0 +1,32 @@
+{
+ "frr-interface:lib": {
+ "interface": [
+ {
+ "name": "eth-sw1",
+ "vrf": "default",
+ "state": {
+ "frr-isisd:isis": {
+ "adjacencies": {
+ "adjacency": [
+ {
+ "neighbor-sys-type": "level-1",
+ "neighbor-sysid": "0000.0000.0003",
+ "hold-timer": 9,
+ "neighbor-priority": 64,
+ "state": "up"
+ },
+ {
+ "neighbor-sys-type": "level-1",
+ "neighbor-sysid": "0000.0000.0002",
+ "hold-timer": 9,
+ "neighbor-priority": 64,
+ "state": "up"
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ip_route.ref
new file mode 100644
index 0000000000..c826efdcfe
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ip_route.ref
@@ -0,0 +1,82 @@
+{
+ "0.0.0.0\/0":[
+ {
+ "prefix":"0.0.0.0\/0",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":10,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2.2.2.2\/32":[
+ {
+ "prefix":"2.2.2.2\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "3.3.3.3\/32":[
+ {
+ "prefix":"3.3.3.3\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.1.0\/24":[
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1"
+ },
+ {
+ "ip":"10.0.1.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ipv6_route.ref
new file mode 100644
index 0000000000..a386b45dad
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ipv6_route.ref
@@ -0,0 +1,59 @@
+{
+ "::\/0":[
+ {
+ "prefix":"::\/0",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":10,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::2\/128":[
+ {
+ "prefix":"2001:db8:1000::2\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::3\/128":[
+ {
+ "prefix":"2001:db8:1000::3\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ip_route.ref
new file mode 100644
index 0000000000..2b281b74fb
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ip_route.ref
@@ -0,0 +1,62 @@
+{
+ "2.2.2.2\/32":[
+ {
+ "prefix":"2.2.2.2\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "3.3.3.3\/32":[
+ {
+ "prefix":"3.3.3.3\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.1.0\/24":[
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1"
+ },
+ {
+ "ip":"10.0.1.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ipv6_route.ref
new file mode 100644
index 0000000000..4b920eda01
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ipv6_route.ref
@@ -0,0 +1,40 @@
+{
+ "2001:db8:1000::2\/128":[
+ {
+ "prefix":"2001:db8:1000::2\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::3\/128":[
+ {
+ "prefix":"2001:db8:1000::3\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ip_route.ref
new file mode 100644
index 0000000000..8557f4b010
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ip_route.ref
@@ -0,0 +1,89 @@
+{
+ "0.0.0.0\/0":[
+ {
+ "prefix":"0.0.0.0\/0",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":10,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ },
+ {
+ "fib":true,
+ "ip":"10.0.1.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2.2.2.2\/32":[
+ {
+ "prefix":"2.2.2.2\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "3.3.3.3\/32":[
+ {
+ "prefix":"3.3.3.3\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.1.0\/24":[
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1"
+ },
+ {
+ "ip":"10.0.1.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ipv6_route.ref
new file mode 100644
index 0000000000..fa76533756
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ipv6_route.ref
@@ -0,0 +1,65 @@
+{
+ "::\/0":[
+ {
+ "prefix":"::\/0",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":10,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ },
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::2\/128":[
+ {
+ "prefix":"2001:db8:1000::2\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::3\/128":[
+ {
+ "prefix":"2001:db8:1000::3\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/zebra.conf b/tests/topotests/isis-lsp-bits-topo1/rt1/zebra.conf
new file mode 100644
index 0000000000..9d71d3005f
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt1/zebra.conf
@@ -0,0 +1,19 @@
+log file zebra.log
+!
+hostname rt1
+!
+debug zebra kernel
+debug zebra packet
+debug zebra mpls
+!
+interface lo
+ ip address 1.1.1.1/32
+ ipv6 address 2001:db8:1000::1/128
+!
+interface eth-sw1
+ ip address 10.0.1.1/24
+!
+ip forwarding
+!
+line vty
+!
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt2/isisd.conf b/tests/topotests/isis-lsp-bits-topo1/rt2/isisd.conf
new file mode 100644
index 0000000000..2bc4c4ad97
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt2/isisd.conf
@@ -0,0 +1,35 @@
+hostname rt2
+log file isisd.log
+!
+debug isis events
+debug isis route-events
+debug isis spf-events
+debug isis sr-events
+debug isis lsp-gen
+!
+interface lo
+ ip router isis 1
+ ipv6 router isis 1
+ isis passive
+!
+interface eth-sw1
+ ip router isis 1
+ ipv6 router isis 1
+ isis hello-multiplier 3
+!
+interface eth-rt4
+ ip router isis 2
+ ipv6 router isis 2
+ isis network point-to-point
+ isis hello-multiplier 3
+!
+router isis 1
+ net 49.0000.0000.0000.0002.00
+ lsp-gen-interval 2
+ topology ipv6-unicast
+!
+router isis 2
+ net 49.0002.0000.0000.0002.00
+ lsp-gen-interval 2
+ topology ipv6-unicast
+!
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ip_route.ref
new file mode 100644
index 0000000000..d7e886ce86
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ip_route.ref
@@ -0,0 +1,77 @@
+{
+ "1.1.1.1\/32":[
+ {
+ "prefix":"1.1.1.1\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.1",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "3.3.3.3\/32":[
+ {
+ "prefix":"3.3.3.3\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.1.0\/24":[
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.1.1",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1"
+ },
+ {
+ "ip":"10.0.1.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1"
+ }
+ ]
+ }
+ ],
+ "10.0.2.0\/24":[
+ {
+ "prefix":"10.0.2.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.2.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ipv6_route.ref
new file mode 100644
index 0000000000..a92272f6d0
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ipv6_route.ref
@@ -0,0 +1,40 @@
+{
+ "2001:db8:1000::1\/128":[
+ {
+ "prefix":"2001:db8:1000::1\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::3\/128":[
+ {
+ "prefix":"2001:db8:1000::3\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_yang_interface_isis_adjacencies.ref
new file mode 100644
index 0000000000..c70b44e1c9
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_yang_interface_isis_adjacencies.ref
@@ -0,0 +1,58 @@
+{
+ "frr-interface:lib": {
+ "interface": [
+ {
+ "name": "eth-rt4",
+ "vrf": "default",
+ "state": {
+ "frr-isisd:isis": {
+ "adjacencies": {
+ "adjacency": [
+ {
+ "neighbor-sys-type": "level-1-2",
+ "neighbor-sysid": "0000.0000.0004",
+ "hold-timer": 9,
+ "neighbor-priority": 0,
+ "state": "up"
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "name": "eth-sw1",
+ "vrf": "default",
+ "state": {
+ "frr-isisd:isis": {
+ "adjacencies": {
+ "adjacency": [
+ {
+ "neighbor-sys-type": "level-1",
+ "neighbor-sysid": "0000.0000.0001",
+ "hold-timer": 9,
+ "neighbor-priority": 100,
+ "state": "up"
+ },
+ {
+ "neighbor-sys-type": "level-1",
+ "neighbor-sysid": "0000.0000.0003",
+ "hold-timer": 9,
+ "neighbor-priority": 64,
+ "state": "up"
+ },
+ {
+ "neighbor-sys-type": "level-2",
+ "neighbor-sysid": "0000.0000.0003",
+ "hold-timer": 9,
+ "neighbor-priority": 64,
+ "state": "up"
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt2/zebra.conf b/tests/topotests/isis-lsp-bits-topo1/rt2/zebra.conf
new file mode 100644
index 0000000000..234e10efa9
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt2/zebra.conf
@@ -0,0 +1,22 @@
+log file zebra.log
+!
+hostname rt2
+!
+debug zebra kernel
+debug zebra packet
+debug zebra mpls
+!
+interface lo
+ ip address 2.2.2.2/32
+ ipv6 address 2001:db8:1000::2/128
+!
+interface eth-sw1
+ ip address 10.0.1.2/24
+!
+interface eth-rt4
+ ip address 10.0.2.2/24
+!
+ip forwarding
+!
+line vty
+!
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt3/isisd.conf b/tests/topotests/isis-lsp-bits-topo1/rt3/isisd.conf
new file mode 100644
index 0000000000..9ad97109b5
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt3/isisd.conf
@@ -0,0 +1,35 @@
+hostname rt3
+log file isisd.log
+!
+debug isis events
+debug isis route-events
+debug isis spf-events
+debug isis sr-events
+debug isis lsp-gen
+!
+interface lo
+ ip router isis 1
+ ipv6 router isis 1
+ isis passive
+!
+interface eth-sw1
+ ip router isis 1
+ ipv6 router isis 1
+ isis hello-multiplier 3
+!
+interface eth-rt5
+ ip router isis 2
+ ipv6 router isis 2
+ isis network point-to-point
+ isis hello-multiplier 3
+!
+router isis 1
+ net 49.0000.0000.0000.0003.00
+ lsp-gen-interval 2
+ topology ipv6-unicast
+!
+router isis 2
+ net 49.0002.0000.0000.0003.00
+ lsp-gen-interval 2
+ topology ipv6-unicast
+!
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ip_route.ref
new file mode 100644
index 0000000000..55f0aedef5
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ip_route.ref
@@ -0,0 +1,97 @@
+{
+ "1.1.1.1\/32":[
+ {
+ "prefix":"1.1.1.1\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.1",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2.2.2.2\/32":[
+ {
+ "prefix":"2.2.2.2\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "5.5.5.5\/32":[
+ {
+ "prefix":"5.5.5.5\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.4.5",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt5",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.1.0\/24":[
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.1.1",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1"
+ },
+ {
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-sw1"
+ }
+ ]
+ }
+ ],
+ "10.0.4.0\/24":[
+ {
+ "prefix":"10.0.4.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.4.5",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt5"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ipv6_route.ref
new file mode 100644
index 0000000000..5d6dfca76a
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ipv6_route.ref
@@ -0,0 +1,59 @@
+{
+ "2001:db8:1000::1\/128":[
+ {
+ "prefix":"2001:db8:1000::1\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::2\/128":[
+ {
+ "prefix":"2001:db8:1000::2\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-sw1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::5\/128":[
+ {
+ "prefix":"2001:db8:1000::5\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt5",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_yang_interface_isis_adjacencies.ref
new file mode 100644
index 0000000000..6950086b1e
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_yang_interface_isis_adjacencies.ref
@@ -0,0 +1,51 @@
+{
+ "frr-interface:lib": {
+ "interface": [
+ {
+ "name": "eth-rt5",
+ "vrf": "default",
+ "state": {
+ "frr-isisd:isis": {
+ "adjacencies": {
+ "adjacency": [
+ {
+ "neighbor-sys-type": "level-1-2",
+ "neighbor-sysid": "0000.0000.0005",
+ "hold-timer": 9,
+ "neighbor-priority": 0,
+ "state": "up"
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "name": "eth-sw1",
+ "vrf": "default",
+ "state": {
+ "frr-isisd:isis": {
+ "adjacencies": {
+ "adjacency": [
+ {
+ "neighbor-sys-type": "level-1",
+ "neighbor-sysid": "0000.0000.0001",
+ "hold-timer": 9,
+ "neighbor-priority": 100,
+ "state": "up"
+ },
+ {
+ "neighbor-sys-type": "level-1",
+ "neighbor-sysid": "0000.0000.0002",
+ "hold-timer": 9,
+ "neighbor-priority": 64,
+ "state": "up"
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt3/zebra.conf b/tests/topotests/isis-lsp-bits-topo1/rt3/zebra.conf
new file mode 100644
index 0000000000..9a0defd62b
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt3/zebra.conf
@@ -0,0 +1,22 @@
+log file zebra.log
+!
+hostname rt3
+!
+debug zebra kernel
+debug zebra packet
+debug zebra mpls
+!
+interface lo
+ ip address 3.3.3.3/32
+ ipv6 address 2001:db8:1000::3/128
+!
+interface eth-sw1
+ ip address 10.0.1.3/24
+!
+interface eth-rt5
+ ip address 10.0.4.3/24
+!
+ip forwarding
+!
+line vty
+!
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt4/isisd.conf b/tests/topotests/isis-lsp-bits-topo1/rt4/isisd.conf
new file mode 100644
index 0000000000..e85412a71d
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt4/isisd.conf
@@ -0,0 +1,42 @@
+hostname rt4
+log file isisd.log
+!
+debug isis events
+debug isis route-events
+debug isis spf-events
+debug isis sr-events
+debug isis lsp-gen
+!
+interface lo
+ ip router isis 4
+ ipv6 router isis 4
+ isis passive
+!
+interface eth-rt2
+ ip router isis 2
+ ipv6 router isis 2
+ isis network point-to-point
+ isis hello-multiplier 3
+!
+interface eth-rt5
+ ip router isis 4
+ ipv6 router isis 4
+ isis network point-to-point
+ isis hello-multiplier 3
+!
+interface eth-rt6
+ ip router isis 4
+ ipv6 router isis 4
+ isis network point-to-point
+ isis hello-multiplier 3
+!
+router isis 2
+ net 49.0002.0000.0000.0004.00
+ lsp-gen-interval 2
+ topology ipv6-unicast
+!
+router isis 4
+ net 49.0004.0000.0000.0004.00
+ lsp-gen-interval 2
+ topology ipv6-unicast
+!
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ip_route.ref
new file mode 100644
index 0000000000..2cf5c40635
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ip_route.ref
@@ -0,0 +1,94 @@
+{
+ "6.6.6.6\/32":[
+ {
+ "prefix":"6.6.6.6\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.7.6",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt6",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.2.0\/24":[
+ {
+ "prefix":"10.0.2.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "10.0.6.0\/24":[
+ {
+ "prefix":"10.0.6.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.6.5",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt5"
+ }
+ ]
+ }
+ ],
+ "10.0.7.0\/24":[
+ {
+ "prefix":"10.0.7.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.7.6",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt6"
+ }
+ ]
+ }
+ ],
+ "10.0.8.0\/24":[
+ {
+ "prefix":"10.0.8.0\/24",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.6.5",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt5",
+ "active":true
+ },
+ {
+ "fib":true,
+ "ip":"10.0.7.6",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt6",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ipv6_route.ref
new file mode 100644
index 0000000000..cde7287943
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ipv6_route.ref
@@ -0,0 +1,21 @@
+{
+ "2001:db8:1000::6\/128":[
+ {
+ "prefix":"2001:db8:1000::6\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt6",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_yang_interface_isis_adjacencies.ref
new file mode 100644
index 0000000000..233180ceb8
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_yang_interface_isis_adjacencies.ref
@@ -0,0 +1,63 @@
+{
+ "frr-interface:lib": {
+ "interface": [
+ {
+ "name": "eth-rt2",
+ "vrf": "default",
+ "state": {
+ "frr-isisd:isis": {
+ "adjacencies": {
+ "adjacency": [
+ {
+ "neighbor-sys-type": "level-1-2",
+ "neighbor-sysid": "0000.0000.0002",
+ "hold-timer": 9,
+ "neighbor-priority": 0,
+ "state": "up"
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "name": "eth-rt5",
+ "vrf": "default",
+ "state": {
+ "frr-isisd:isis": {
+ "adjacencies": {
+ "adjacency": [
+ {
+ "neighbor-sys-type": "level-1-2",
+ "neighbor-sysid": "0000.0000.0005",
+ "hold-timer": 9,
+ "neighbor-priority": 0,
+ "state": "up"
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "name": "eth-rt6",
+ "vrf": "default",
+ "state": {
+ "frr-isisd:isis": {
+ "adjacencies": {
+ "adjacency": [
+ {
+ "neighbor-sys-type": "level-1",
+ "neighbor-sysid": "0000.0000.0006",
+ "hold-timer": 9,
+ "neighbor-priority": 0,
+ "state": "up"
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt4/zebra.conf b/tests/topotests/isis-lsp-bits-topo1/rt4/zebra.conf
new file mode 100644
index 0000000000..adcf433249
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt4/zebra.conf
@@ -0,0 +1,25 @@
+log file zebra.log
+!
+hostname rt4
+!
+debug zebra kernel
+debug zebra packet
+debug zebra mpls
+!
+interface lo
+ ip address 4.4.4.4/32
+ ipv6 address 2001:db8:1000::4/128
+!
+interface eth-rt2
+ ip address 10.0.2.4/24
+!
+interface eth-rt5
+ ip address 10.0.6.4/24
+!
+interface eth-rt6
+ ip address 10.0.7.4/24
+!
+ip forwarding
+!
+line vty
+!
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt5/isisd.conf b/tests/topotests/isis-lsp-bits-topo1/rt5/isisd.conf
new file mode 100644
index 0000000000..2cab0c88fc
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt5/isisd.conf
@@ -0,0 +1,42 @@
+hostname rt5
+log file isisd.log
+!
+debug isis events
+debug isis route-events
+debug isis spf-events
+debug isis sr-events
+debug isis lsp-gen
+!
+interface lo
+ ip router isis 2
+ ipv6 router isis 2
+ isis passive
+!
+interface eth-rt3
+ ip router isis 2
+ ipv6 router isis 2
+ isis network point-to-point
+ isis hello-multiplier 3
+!
+interface eth-rt4
+ ip router isis 4
+ ipv6 router isis 4
+ isis network point-to-point
+ isis hello-multiplier 3
+!
+interface eth-rt6
+ ip router isis 4
+ ipv6 router isis 4
+ isis network point-to-point
+ isis hello-multiplier 3
+!
+router isis 2
+ net 49.0002.0000.0000.0005.00
+ lsp-gen-interval 2
+ topology ipv6-unicast
+!
+router isis 4
+ net 49.0004.0000.0000.0005.00
+ lsp-gen-interval 2
+ topology ipv6-unicast
+!
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ip_route.ref
new file mode 100644
index 0000000000..fe34b03890
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ip_route.ref
@@ -0,0 +1,118 @@
+{
+ "4.4.4.4\/32":[
+ {
+ "prefix":"4.4.4.4\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.6.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "6.6.6.6\/32":[
+ {
+ "prefix":"6.6.6.6\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.8.6",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt6",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.4.0\/24":[
+ {
+ "prefix":"10.0.4.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.4.3",
+ "afi":"ipv4",
+ "interfaceIndex":2,
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "10.0.6.0\/24":[
+ {
+ "prefix":"10.0.6.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.6.4",
+ "afi":"ipv4",
+ "interfaceIndex":3,
+ "interfaceName":"eth-rt4"
+ }
+ ]
+ }
+ ],
+ "10.0.7.0\/24":[
+ {
+ "prefix":"10.0.7.0\/24",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "table":254,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.6.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+ },
+ {
+ "fib":true,
+ "ip":"10.0.8.6",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt6",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.8.0\/24":[
+ {
+ "prefix":"10.0.8.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.8.6",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt6",
+ "weight":1
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ipv6_route.ref
new file mode 100644
index 0000000000..7586c73852
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ipv6_route.ref
@@ -0,0 +1,40 @@
+{
+ "2001:db8:1000::4\/128":[
+ {
+ "prefix":"2001:db8:1000::4\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt4",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::6\/128":[
+ {
+ "prefix":"2001:db8:1000::6\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt6",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_yang_interface_isis_adjacencies.ref
new file mode 100644
index 0000000000..f939a6abff
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_yang_interface_isis_adjacencies.ref
@@ -0,0 +1,63 @@
+{
+ "frr-interface:lib": {
+ "interface": [
+ {
+ "name": "eth-rt3",
+ "vrf": "default",
+ "state": {
+ "frr-isisd:isis": {
+ "adjacencies": {
+ "adjacency": [
+ {
+ "neighbor-sys-type": "level-1-2",
+ "neighbor-sysid": "0000.0000.0003",
+ "hold-timer": 9,
+ "neighbor-priority": 0,
+ "state": "up"
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "name": "eth-rt4",
+ "vrf": "default",
+ "state": {
+ "frr-isisd:isis": {
+ "adjacencies": {
+ "adjacency": [
+ {
+ "neighbor-sys-type": "level-1-2",
+ "neighbor-sysid": "0000.0000.0004",
+ "hold-timer": 9,
+ "neighbor-priority": 0,
+ "state": "up"
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "name": "eth-rt6",
+ "vrf": "default",
+ "state": {
+ "frr-isisd:isis": {
+ "adjacencies": {
+ "adjacency": [
+ {
+ "neighbor-sys-type": "level-1",
+ "neighbor-sysid": "0000.0000.0006",
+ "hold-timer": 9,
+ "neighbor-priority": 0,
+ "state": "up"
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt5/zebra.conf b/tests/topotests/isis-lsp-bits-topo1/rt5/zebra.conf
new file mode 100644
index 0000000000..0f10ce921f
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt5/zebra.conf
@@ -0,0 +1,25 @@
+log file zebra.log
+!
+hostname rt5
+!
+debug zebra kernel
+debug zebra packet
+debug zebra mpls
+!
+interface lo
+ ip address 5.5.5.5/32
+ ipv6 address 2001:db8:1000::5/128
+!
+interface eth-rt3
+ ip address 10.0.4.5/24
+!
+interface eth-rt4
+ ip address 10.0.6.5/24
+!
+interface eth-rt6
+ ip address 10.0.8.5/24
+!
+ip forwarding
+!
+line vty
+!
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/isisd.conf b/tests/topotests/isis-lsp-bits-topo1/rt6/isisd.conf
new file mode 100644
index 0000000000..249f945e0c
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt6/isisd.conf
@@ -0,0 +1,32 @@
+hostname rt6
+log file isisd.log
+!
+debug isis events
+debug isis route-events
+debug isis spf-events
+debug isis sr-events
+debug isis lsp-gen
+!
+interface lo
+ ip router isis 4
+ ipv6 router isis 4
+ isis passive
+!
+interface eth-rt4
+ ip router isis 4
+ ipv6 router isis 4
+ isis network point-to-point
+ isis hello-multiplier 3
+!
+interface eth-rt5
+ ip router isis 4
+ ipv6 router isis 4
+ isis network point-to-point
+ isis hello-multiplier 3
+!
+router isis 4
+ net 49.0004.0000.0000.0006.00
+ is-type level-1
+ lsp-gen-interval 2
+ topology ipv6-unicast
+!
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ip_route.ref
new file mode 100644
index 0000000000..2840514e6e
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ip_route.ref
@@ -0,0 +1,107 @@
+{
+ "0.0.0.0\/0":[
+ {
+ "prefix":"0.0.0.0\/0",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":10,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+ },
+ {
+ "fib":true,
+ "ip":"10.0.8.5",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt5",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "4.4.4.4\/32":[
+ {
+ "prefix":"4.4.4.4\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.6.0\/24":[
+ {
+ "prefix":"10.0.6.0\/24",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+ },
+ {
+ "fib":true,
+ "ip":"10.0.8.5",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt5",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.7.0\/24":[
+ {
+ "prefix":"10.0.7.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4"
+ }
+ ]
+ }
+ ],
+ "10.0.8.0\/24":[
+ {
+ "prefix":"10.0.8.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.8.5",
+ "afi":"ipv4",
+ "interfaceIndex":3,
+ "interfaceName":"eth-rt5"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ipv6_route.ref
new file mode 100644
index 0000000000..278129f481
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ipv6_route.ref
@@ -0,0 +1,46 @@
+{
+ "::\/0":[
+ {
+ "prefix":"::\/0",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":10,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt5",
+ "active":true
+ },
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt4",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::4\/128":[
+ {
+ "prefix":"2001:db8:1000::4\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt4",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_yang_interface_isis_adjacencies.ref
new file mode 100644
index 0000000000..b4e8c23189
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_yang_interface_isis_adjacencies.ref
@@ -0,0 +1,44 @@
+{
+ "frr-interface:lib": {
+ "interface": [
+ {
+ "name": "eth-rt4",
+ "vrf": "default",
+ "state": {
+ "frr-isisd:isis": {
+ "adjacencies": {
+ "adjacency": [
+ {
+ "neighbor-sys-type": "level-1-2",
+ "neighbor-sysid": "0000.0000.0004",
+ "hold-timer": 9,
+ "neighbor-priority": 0,
+ "state": "up"
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "name": "eth-rt5",
+ "vrf": "default",
+ "state": {
+ "frr-isisd:isis": {
+ "adjacencies": {
+ "adjacency": [
+ {
+ "neighbor-sys-type": "level-1-2",
+ "neighbor-sysid": "0000.0000.0005",
+ "hold-timer": 9,
+ "neighbor-priority": 0,
+ "state": "up"
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ip_route.ref
new file mode 100644
index 0000000000..61efcb9e98
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ip_route.ref
@@ -0,0 +1,100 @@
+{
+ "0.0.0.0\/0":[
+ {
+ "prefix":"0.0.0.0\/0",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":10,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.8.5",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt5",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "4.4.4.4\/32":[
+ {
+ "prefix":"4.4.4.4\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.6.0\/24":[
+ {
+ "prefix":"10.0.6.0\/24",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+ },
+ {
+ "fib":true,
+ "ip":"10.0.8.5",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt5",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.7.0\/24":[
+ {
+ "prefix":"10.0.7.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4"
+ }
+ ]
+ }
+ ],
+ "10.0.8.0\/24":[
+ {
+ "prefix":"10.0.8.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.8.5",
+ "afi":"ipv4",
+ "interfaceIndex":3,
+ "interfaceName":"eth-rt5"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ipv6_route.ref
new file mode 100644
index 0000000000..2e982e0c37
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ipv6_route.ref
@@ -0,0 +1,40 @@
+{
+ "::\/0":[
+ {
+ "prefix":"::\/0",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":10,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt5",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::4\/128":[
+ {
+ "prefix":"2001:db8:1000::4\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt4",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ip_route.ref
new file mode 100644
index 0000000000..8fecf14687
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ip_route.ref
@@ -0,0 +1,80 @@
+{
+ "4.4.4.4\/32":[
+ {
+ "prefix":"4.4.4.4\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.6.0\/24":[
+ {
+ "prefix":"10.0.6.0\/24",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+ },
+ {
+ "fib":true,
+ "ip":"10.0.8.5",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt5",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.7.0\/24":[
+ {
+ "prefix":"10.0.7.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4"
+ }
+ ]
+ }
+ ],
+ "10.0.8.0\/24":[
+ {
+ "prefix":"10.0.8.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.8.5",
+ "afi":"ipv4",
+ "interfaceIndex":3,
+ "interfaceName":"eth-rt5"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ipv6_route.ref
new file mode 100644
index 0000000000..9b53a1d760
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ipv6_route.ref
@@ -0,0 +1,21 @@
+{
+ "2001:db8:1000::4\/128":[
+ {
+ "prefix":"2001:db8:1000::4\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt4",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ip_route.ref
new file mode 100644
index 0000000000..2840514e6e
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ip_route.ref
@@ -0,0 +1,107 @@
+{
+ "0.0.0.0\/0":[
+ {
+ "prefix":"0.0.0.0\/0",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":10,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+ },
+ {
+ "fib":true,
+ "ip":"10.0.8.5",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt5",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "4.4.4.4\/32":[
+ {
+ "prefix":"4.4.4.4\/32",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.6.0\/24":[
+ {
+ "prefix":"10.0.6.0\/24",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+ },
+ {
+ "fib":true,
+ "ip":"10.0.8.5",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt5",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.7.0\/24":[
+ {
+ "prefix":"10.0.7.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4"
+ }
+ ]
+ }
+ ],
+ "10.0.8.0\/24":[
+ {
+ "prefix":"10.0.8.0\/24",
+ "protocol":"isis",
+ "distance":115,
+ "metric":20,
+ "nexthops":[
+ {
+ "ip":"10.0.8.5",
+ "afi":"ipv4",
+ "interfaceIndex":3,
+ "interfaceName":"eth-rt5"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ipv6_route.ref
new file mode 100644
index 0000000000..278129f481
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ipv6_route.ref
@@ -0,0 +1,46 @@
+{
+ "::\/0":[
+ {
+ "prefix":"::\/0",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":10,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt5",
+ "active":true
+ },
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt4",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "2001:db8:1000::4\/128":[
+ {
+ "prefix":"2001:db8:1000::4\/128",
+ "protocol":"isis",
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+ "metric":20,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt4",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/zebra.conf b/tests/topotests/isis-lsp-bits-topo1/rt6/zebra.conf
new file mode 100644
index 0000000000..6084010a93
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/rt6/zebra.conf
@@ -0,0 +1,22 @@
+log file zebra.log
+!
+hostname rt6
+!
+debug zebra kernel
+debug zebra packet
+debug zebra mpls
+!
+interface lo
+ ip address 6.6.6.6/32
+ ipv6 address 2001:db8:1000::6/128
+!
+interface eth-rt4
+ ip address 10.0.7.6/24
+!
+interface eth-rt5
+ ip address 10.0.8.6/24
+!
+ip forwarding
+!
+line vty
+!
diff --git a/tests/topotests/isis-lsp-bits-topo1/test_isis_lsp_bits_topo1.py b/tests/topotests/isis-lsp-bits-topo1/test_isis_lsp_bits_topo1.py
new file mode 100755
index 0000000000..95a0d87c33
--- /dev/null
+++ b/tests/topotests/isis-lsp-bits-topo1/test_isis_lsp_bits_topo1.py
@@ -0,0 +1,353 @@
+#!/usr/bin/env python
+
+#
+# test_isis_lsp_bits_topo1.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2021 by Volta Networks
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_isis_lsp_bits_topo1.py:
+
+ +---------+
+ | |
+ | RT1 |
+ | 1.1.1.1 |
+ | L1 |
+ +---------+
+ |eth-sw1
+ |
+ |
+ |
+ +---------+ | +---------+
+ | | | | |
+ | RT2 |eth-sw1 | eth-sw1| RT3 |
+ | 2.2.2.2 +----------+----------+ 3.3.3.3 |
+ | L1|L2 | 10.0.1.0/24 | L1|L2 |
+ +---------+ +---------+
+ eth-rt4| eth-rt5|
+ | |
+ 10.0.2.0/24| |10.0.4.0/24
+ | |
+ eth-rt2| eth-rt3|
+ +---------+ +---------+
+ | | | |
+ | RT4 | 10.0.6.0/24 | RT5 |
+ | 4.4.4.4 +---------------------+ 5.5.5.5 |
+ | L1|L2 |eth-rt5 eth-rt4| L1|L2 |
+ +---------+ +---------+
+ eth-rt6| |eth-rt6
+ | |
+ 10.0.7.0/24| |10.0.8.0/24
+ | +---------+ |
+ | | | |
+ | | RT6 | |
+ +----------+ 6.6.6.6 +-----------+
+ eth-rt4| L1 |eth-rt5
+ +---------+
+"""
+
+import os
+import sys
+import pytest
+import json
+import re
+import tempfile
+from time import sleep
+from functools import partial
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+# Global multi-dimensional dictionary containing all expected outputs
+outputs = {}
+
+class TemplateTopo(Topo):
+ "Test topology builder"
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ #
+ # Define FRR Routers
+ #
+ for router in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
+ tgen.add_router(router)
+
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch('s1')
+ switch.add_link(tgen.gears['rt1'], nodeif="eth-sw1")
+ switch.add_link(tgen.gears['rt2'], nodeif="eth-sw1")
+ switch.add_link(tgen.gears['rt3'], nodeif="eth-sw1")
+
+ switch = tgen.add_switch('s2')
+ switch.add_link(tgen.gears['rt2'], nodeif="eth-rt4")
+ switch.add_link(tgen.gears['rt4'], nodeif="eth-rt2")
+
+ switch = tgen.add_switch('s4')
+ switch.add_link(tgen.gears['rt3'], nodeif="eth-rt5")
+ switch.add_link(tgen.gears['rt5'], nodeif="eth-rt3")
+
+ switch = tgen.add_switch('s6')
+ switch.add_link(tgen.gears['rt4'], nodeif="eth-rt5")
+ switch.add_link(tgen.gears['rt5'], nodeif="eth-rt4")
+
+ switch = tgen.add_switch('s7')
+ switch.add_link(tgen.gears['rt4'], nodeif="eth-rt6")
+ switch.add_link(tgen.gears['rt6'], nodeif="eth-rt4")
+
+ switch = tgen.add_switch('s8')
+ switch.add_link(tgen.gears['rt5'], nodeif="eth-rt6")
+ switch.add_link(tgen.gears['rt6'], nodeif="eth-rt5")
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ # For all registered routers, load the zebra configuration file
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_ISIS,
+ os.path.join(CWD, '{}/isisd.conf'.format(rname))
+ )
+
+ tgen.start_router()
+
+def teardown_module(mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+
+def router_compare_json_output(rname, command, reference):
+ "Compare router JSON output"
+
+ logger.info('Comparing router "%s" "%s" output', rname, command)
+
+ tgen = get_topogen()
+ filename = "{}/{}/{}".format(CWD, rname, reference)
+ expected = json.loads(open(filename).read())
+
+ # Run test function until we get an result. Wait at most 60 seconds.
+ test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected)
+ _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
+ assert diff is None, assertmsg
+
+#
+# Step 1
+#
+# Test initial network convergence
+# Attach-bit defaults to on, so expect default route pointing to L1|L2 router
+#
+def test_isis_adjacencies_step1():
+ logger.info("Test (step 1): check IS-IS adjacencies")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
+ router_compare_json_output(
+ rname,
+ "show yang operational-data /frr-interface:lib isisd",
+ "step1/show_yang_interface_isis_adjacencies.ref",
+ )
+
+def test_rib_ipv4_step1():
+ logger.info("Test (step 1): verify IPv4 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
+ router_compare_json_output(
+ rname, "show ip route isis json", "step1/show_ip_route.ref"
+ )
+
+def test_rib_ipv6_step1():
+ logger.info("Test (step 1): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
+ router_compare_json_output(
+ rname, "show ipv6 route isis json", "step1/show_ipv6_route.ref"
+ )
+
+#
+# Step 2
+#
+# Action(s):
+# -Disable sending Attach bit on RT2 and RT4
+#
+# Expected changes:
+# -RT1 should remove the default route pointing to RT2
+# -RT6 should remove the default route pointing to RT4
+#
+def test_rib_ipv4_step2():
+ logger.info("Test (step 2): verify IPv4 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info('Disabling setting the attached-bit on RT2 and RT4')
+ tgen.net['rt2'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no attached-bit send"')
+ tgen.net['rt4'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no attached-bit send"')
+
+ for rname in ['rt1', 'rt6']:
+ router_compare_json_output(
+ rname, "show ip route isis json", "step2/show_ip_route.ref"
+ )
+
+def test_rib_ipv6_step2():
+ logger.info("Test (step 2): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ['rt1', 'rt6']:
+ router_compare_json_output(
+ rname, "show ipv6 route isis json", "step2/show_ipv6_route.ref"
+ )
+
+#
+# Step 3
+#
+# Action(s):
+# -restore attach-bit, enable sending attach-bit
+# -disble processing a LSP with attach bit set
+#
+# Expected changes:
+# -RT1 and RT6 should not install a default route
+#
+def test_rib_ipv4_step3():
+ logger.info("Test (step 3): verify IPv4 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info('Enable setting the attached-bit on RT2 and RT4')
+ tgen.net['rt2'].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit send"')
+ tgen.net['rt4'].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit send"')
+
+ logger.info('Disable processing received attached-bit in LSP on RT1 and RT6')
+ tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit receive ignore"')
+ tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit receive ignore"')
+
+ for rname in ['rt1', 'rt6']:
+ router_compare_json_output(
+ rname, "show ip route isis json", "step3/show_ip_route.ref"
+ )
+
+def test_rib_ipv6_step3():
+ logger.info("Test (step 3): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ['rt1', 'rt6']:
+ router_compare_json_output(
+ rname, "show ipv6 route isis json", "step3/show_ipv6_route.ref"
+ )
+
+#
+# Step 4
+#
+# Action(s):
+# -restore back to default attach-bit config
+#
+# Expected changes:
+# -RT1 and RT6 should add default route
+# -no changes on other routers
+#
+def test_rib_ipv4_step4():
+ logger.info("Test (step 4): verify IPv4 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info('restore default processing on received attached-bit in LSP on RT1 and RT6')
+ tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no attached-bit receive ignore"')
+ tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no attached-bit receive ignore"')
+
+ for rname in ['rt1', 'rt6']:
+ router_compare_json_output(
+ rname, "show ip route isis json", "step4/show_ip_route.ref")
+
+def test_rib_ipv6_step4():
+ logger.info("Test (step 4): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ['rt1', 'rt6']:
+ router_compare_json_output(
+ rname, "show ipv6 route isis json", "step4/show_ipv6_route.ref")
+
+# Memory leak test template
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip('Memory leak test/report is disabled')
+
+ tgen.report_memory_leaks()
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index 175d660d1e..3f360ef40a 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -257,6 +257,7 @@ def create_common_configuration(
"bgp": "! BGP Config\n",
"vrf": "! VRF Config\n",
"ospf": "! OSPF Config\n",
+ "ospf6": "! OSPF Config\n",
"pim": "! PIM Config\n",
}
)
@@ -752,6 +753,12 @@ def start_topology(tgen, daemon=None):
TopoRouter.RD_OSPF, "{}/{}/ospfd.conf".format(TMPDIR, rname)
)
+ if daemon and "ospf6d" in daemon:
+ # Loading empty ospf.conf file to router, to start the bgp daemon
+ router.load_config(
+ TopoRouter.RD_OSPF6, "{}/{}/ospf6d.conf".format(TMPDIR, rname)
+ )
+
if daemon and "pimd" in daemon:
# Loading empty pimd.conf file to router, to start the pim deamon
router.load_config(
@@ -845,6 +852,9 @@ def topo_daemons(tgen, topo):
if "ospf" in topo["routers"][rtr] and "ospfd" not in daemon_list:
daemon_list.append("ospfd")
+ if "ospf6" in topo["routers"][rtr] and "ospf6d" not in daemon_list:
+ daemon_list.append("ospf6d")
+
for val in topo["routers"][rtr]["links"].values():
if "pim" in val and "pimd" not in daemon_list:
daemon_list.append("pimd")
@@ -1594,6 +1604,25 @@ def create_interfaces_cfg(tgen, topo, build=False):
-------
True or False
"""
+
+ def _create_interfaces_ospf_cfg(ospf, c_data, data, ospf_keywords):
+ interface_data = []
+ ip_ospf = "ipv6 ospf6" if ospf == "ospf6" else "ip ospf"
+ for keyword in ospf_keywords:
+ if keyword in data[ospf]:
+ intf_ospf_value = c_data["links"][destRouterLink][ospf][keyword]
+ if "delete" in data and data["delete"]:
+ interface_data.append(
+ "no {} {}".format(ip_ospf, keyword.replace("_", "-"))
+ )
+ else:
+ interface_data.append(
+ "{} {} {}".format(
+ ip_ospf, keyword.replace("_", "-"), intf_ospf_value
+ )
+ )
+ return interface_data
+
result = False
topo = deepcopy(topo)
@@ -1640,66 +1669,26 @@ def create_interfaces_cfg(tgen, topo, build=False):
else:
interface_data.append("ipv6 address {}\n".format(intf_addr))
+ ospf_keywords = [
+ "hello_interval",
+ "dead_interval",
+ "network",
+ "priority",
+ "cost",
+ ]
if "ospf" in data:
- ospf_data = data["ospf"]
- if "area" in ospf_data:
- intf_ospf_area = c_data["links"][destRouterLink]["ospf"]["area"]
- if "delete" in data and data["delete"]:
- interface_data.append("no ip ospf area")
- else:
- interface_data.append(
- "ip ospf area {}".format(intf_ospf_area)
- )
-
- if "hello_interval" in ospf_data:
- intf_ospf_hello = c_data["links"][destRouterLink]["ospf"][
- "hello_interval"
- ]
- if "delete" in data and data["delete"]:
- interface_data.append("no ip ospf " " hello-interval")
- else:
- interface_data.append(
- "ip ospf" " hello-interval {}".format(intf_ospf_hello)
- )
-
- if "dead_interval" in ospf_data:
- intf_ospf_dead = c_data["links"][destRouterLink]["ospf"][
- "dead_interval"
- ]
- if "delete" in data and data["delete"]:
- interface_data.append("no ip ospf" " dead-interval")
- else:
- interface_data.append(
- "ip ospf" " dead-interval {}".format(intf_ospf_dead)
- )
-
- if "network" in ospf_data:
- intf_ospf_nw = c_data["links"][destRouterLink]["ospf"][
- "network"
- ]
- if "delete" in data and data["delete"]:
- interface_data.append(
- "no ip ospf" " network {}".format(intf_ospf_nw)
- )
- else:
- interface_data.append(
- "ip ospf" " network {}".format(intf_ospf_nw)
- )
-
- if "priority" in ospf_data:
- intf_ospf_nw = c_data["links"][destRouterLink]["ospf"][
- "priority"
- ]
+ interface_data += _create_interfaces_ospf_cfg(
+ "ospf", c_data, data, ospf_keywords + ["area"]
+ )
+ if "ospf6" in data:
+ interface_data += _create_interfaces_ospf_cfg(
+ "ospf6", c_data, data, ospf_keywords
+ )
- if "delete" in data and data["delete"]:
- interface_data.append("no ip ospf" " priority")
- else:
- interface_data.append(
- "ip ospf" " priority {}".format(intf_ospf_nw)
- )
result = create_common_configuration(
tgen, c_router, interface_data, "interface_config", build=build
)
+
except InvalidCLIError:
# Traceback
errormsg = traceback.format_exc()
@@ -4395,4 +4384,3 @@ def verify_ip_nht(tgen, input_dict):
logger.debug("Exiting lib API: verify_ip_nht()")
return False
-
diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py
index 3e368cd7d3..5bc9f14fea 100644
--- a/tests/topotests/lib/ospf.py
+++ b/tests/topotests/lib/ospf.py
@@ -94,7 +94,7 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru
return result
-def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True):
+def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True, ospf="ospf"):
"""
Helper API to create ospf global configuration.
@@ -105,6 +105,33 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True
* `router` : router to be configured.
* `build` : Only for initial setup phase this is set as True.
* `load_config` : Loading the config to router this is set as True.
+ * `ospf` : either 'ospf' or 'ospf6'
+
+ Usage
+ -----
+ input_dict = {
+ "routers": {
+ "r1": {
+ "links": {
+ "r3": {
+ "ipv6": "2013:13::1/64",
+ "ospf6": {
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "1.1.1.1",
+ "neighbors": {
+ "r3": {
+ "area": "1.1.1.1"
+ }
+ }
+ }
+ }
+ }
Returns
-------
@@ -115,17 +142,17 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True
logger.debug("Entering lib API: __create_ospf_global()")
try:
- ospf_data = input_dict[router]["ospf"]
+ ospf_data = input_dict[router][ospf]
del_ospf_action = ospf_data.setdefault("delete", False)
if del_ospf_action:
- config_data = ["no router ospf"]
+ config_data = ["no router {}".format(ospf)]
result = create_common_configuration(
- tgen, router, config_data, "ospf", build, load_config
+ tgen, router, config_data, ospf, build, load_config
)
return result
config_data = []
- cmd = "router ospf"
+ cmd = "router {}".format(ospf)
config_data.append(cmd)
@@ -133,9 +160,9 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True
router_id = ospf_data.setdefault("router_id", None)
del_router_id = ospf_data.setdefault("del_router_id", False)
if del_router_id:
- config_data.append("no ospf router-id")
+ config_data.append("no {} router-id".format(ospf))
if router_id:
- config_data.append("ospf router-id {}".format(router_id))
+ config_data.append("{} router-id {}".format(ospf, router_id))
# redistribute command
redistribute_data = ospf_data.setdefault("redistribute", {})
@@ -154,6 +181,7 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True
if del_action:
cmd = "no {}".format(cmd)
config_data.append(cmd)
+
# area information
area_data = ospf_data.setdefault("area", {})
if area_data:
@@ -173,6 +201,20 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True
cmd = "no {}".format(cmd)
config_data.append(cmd)
+ # area interface information for ospf6d only
+ if ospf == "ospf6":
+ area_iface = ospf_data.setdefault("neighbors", {})
+ if area_iface:
+ for neighbor in area_iface:
+ if "area" in area_iface[neighbor]:
+ iface = input_dict[router]["links"][neighbor]["interface"]
+ cmd = "interface {} area {}".format(
+ iface, area_iface[neighbor]["area"]
+ )
+ if area_iface[neighbor].setdefault("delete", False):
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
# summary information
summary_data = ospf_data.setdefault("summary-address", {})
if summary_data:
@@ -197,8 +239,9 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True
if del_action:
cmd = "no {}".format(cmd)
config_data.append(cmd)
+
result = create_common_configuration(
- tgen, router, config_data, "ospf", build, load_config
+ tgen, router, config_data, ospf, build, load_config
)
except InvalidCLIError:
@@ -235,7 +278,7 @@ def create_router_ospf6(tgen, topo, input_dict=None, build=False, load_config=Tr
-------
True or False
"""
- logger.debug("Entering lib API: create_router_ospf()")
+ logger.debug("Entering lib API: create_router_ospf6()")
result = False
if not input_dict:
@@ -244,67 +287,15 @@ def create_router_ospf6(tgen, topo, input_dict=None, build=False, load_config=Tr
topo = topo["routers"]
input_dict = deepcopy(input_dict)
for router in input_dict.keys():
- if "ospf" not in input_dict[router]:
- logger.debug("Router %s: 'ospf' not present in input_dict", router)
+ if "ospf6" not in input_dict[router]:
+ logger.debug("Router %s: 'ospf6' not present in input_dict", router)
continue
- result = __create_ospf_global(tgen, input_dict, router, build, load_config)
-
- logger.debug("Exiting lib API: create_router_ospf()")
- return result
-
-
-def __create_ospf6_global(tgen, input_dict, router, build=False, load_config=True):
- """
- Helper API to create ospf global configuration.
-
- Parameters
- ----------
- * `tgen` : Topogen object
- * `input_dict` : Input dict data, required when configuring from testcase
- * `router` : router id to be configured.
- * `build` : Only for initial setup phase this is set as True.
-
- Returns
- -------
- True or False
- """
-
- result = False
- logger.debug("Entering lib API: __create_ospf_global()")
- try:
-
- ospf_data = input_dict[router]["ospf6"]
- del_ospf_action = ospf_data.setdefault("delete", False)
- if del_ospf_action:
- config_data = ["no ipv6 router ospf"]
- result = create_common_configuration(
- tgen, router, config_data, "ospf", build, load_config
- )
- return result
-
- config_data = []
- cmd = "router ospf"
-
- config_data.append(cmd)
-
- router_id = ospf_data.setdefault("router_id", None)
- del_router_id = ospf_data.setdefault("del_router_id", False)
- if del_router_id:
- config_data.append("no ospf router-id")
- if router_id:
- config_data.append("ospf router-id {}".format(router_id))
-
- result = create_common_configuration(
- tgen, router, config_data, "ospf", build, load_config
+ result = __create_ospf_global(
+ tgen, input_dict, router, build, load_config, "ospf6"
)
- except InvalidCLIError:
- # Traceback
- errormsg = traceback.format_exc()
- logger.error(errormsg)
- return errormsg
- logger.debug("Exiting lib API: create_ospf_global()")
+ logger.debug("Exiting lib API: create_router_ospf6()")
return result
@@ -682,6 +673,70 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
return result
+################################
+# Verification procs
+################################
+@retry(attempts=40, wait=2, return_is_str=True)
+def verify_ospf6_neighbor(tgen, topo):
+ """
+ This API is to verify ospf neighborship by running
+ show ip ospf neighbour command,
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+
+ Usage
+ -----
+ Check FULL neighbors.
+ verify_ospf_neighbor(tgen, topo)
+
+ result = verify_ospf_neighbor(tgen, topo)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+
+ logger.debug("Entering lib API: verify_ospf6_neighbor()")
+ result = False
+ for router, rnode in tgen.routers().items():
+ if "ospf6" not in topo["routers"][router]:
+ continue
+
+ logger.info("Verifying OSPF6 neighborship on router %s:", router)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ipv6 ospf6 neighbor json", isjson=True
+ )
+
+ if not show_ospf_json:
+ return "OSPF6 is not running"
+
+ ospf_nbr_list = topo["routers"][router]["ospf6"]["neighbors"]
+ no_of_peer = 0
+ for ospf_nbr in ospf_nbr_list:
+ ospf_nbr_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"]
+ for neighbor in show_ospf_json["neighbors"]:
+ if neighbor["neighborId"] == ospf_nbr_rid:
+ nh_state = neighbor["state"]
+ break
+ else:
+ return "[DUT: {}] OSPF6 peer {} missing".format(router, data_rid)
+
+ if nh_state == "Full":
+ no_of_peer += 1
+
+ if no_of_peer == len(ospf_nbr_list):
+ logger.info("[DUT: {}] OSPF6 is Converged".format(router))
+ result = True
+ else:
+ return "[DUT: {}] OSPF6 is not Converged".format(router)
+
+ logger.debug("Exiting API: verify_ospf6_neighbor()")
+ return result
+
+
@retry(attempts=21, wait=2, return_is_str=True)
def verify_ospf_rib(
tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
index 6bb1326519..294f60bf68 100644
--- a/tests/topotests/lib/pim.py
+++ b/tests/topotests/lib/pim.py
@@ -539,7 +539,8 @@ def configure_pim_force_expire(tgen, topo, input_dict, build=False):
#############################################
# Verification APIs
#############################################
-def verify_pim_neighbors(tgen, topo, dut=None, iface=None):
+@retry(attempts=6, wait=2, return_is_str=True)
+def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None):
"""
Verify all PIM neighbors are up and running, config is verified
using "show ip pim neighbor" cli
@@ -550,10 +551,11 @@ def verify_pim_neighbors(tgen, topo, dut=None, iface=None):
* `topo` : json file data
* `dut` : dut info
* `iface` : link for which PIM nbr need to check
+ * `nbr_ip` : neighbor ip of interface
Usage
-----
- result = verify_pim_neighbors(tgen, topo, dut, link)
+ result = verify_pim_neighbors(tgen, topo, dut, iface=ens192, nbr_ip=20.1.1.2)
Returns
-------
@@ -1530,8 +1532,8 @@ def verify_pim_interface_traffic(tgen, input_dict):
return output_dict
-@retry(attempts=31, wait=2, return_is_str=True)
-def verify_pim_interface(tgen, topo, dut):
+@retry(attempts=21, wait=2, return_is_str=True)
+def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None):
"""
Verify all PIM interface are up and running, config is verified
using "show ip pim interface" cli
@@ -1541,10 +1543,12 @@ def verify_pim_interface(tgen, topo, dut):
* `tgen`: topogen object
* `topo` : json file data
* `dut` : device under test
+ * `interface` : interface name
+ * `interface_ip` : interface ip address
Usage
-----
- result = verify_pim_interfacetgen, topo, dut)
+ result = verify_pim_interfacetgen, topo, dut, interface=ens192, interface_ip=20.1.1.1)
Returns
-------
@@ -1560,56 +1564,55 @@ def verify_pim_interface(tgen, topo, dut):
logger.info("[DUT: %s]: Verifying PIM interface status:", dut)
rnode = tgen.routers()[dut]
- show_ip_pim_interface_json = run_frr_cmd(
- rnode, "show ip pim interface json", isjson=True
- )
-
- for destLink, data in topo["routers"][dut]["links"].items():
- if "type" in data and data["type"] == "loopback":
- continue
+ show_ip_pim_interface_json = rnode.\
+ vtysh_cmd("show ip pim interface json", isjson=True)
+
+ logger.info("show_ip_pim_interface_json: \n %s",
+ show_ip_pim_interface_json)
+
+ if interface_ip:
+ if interface in show_ip_pim_interface_json:
+ pim_intf_json = show_ip_pim_interface_json[interface]
+ if pim_intf_json["address"] != interface_ip:
+ errormsg = ("[DUT %s]: PIM interface "
+ "ip is not correct "
+ "[FAILED]!! Expected : %s, Found : %s"
+ %(dut, pim_intf_json["address"],interface_ip))
+ return errormsg
+ else:
+ logger.info("[DUT %s]: PIM interface "
+ "ip is correct "
+ "[Passed]!! Expected : %s, Found : %s"
+ %(dut, pim_intf_json["address"],interface_ip))
+ return True
+ else:
+ for destLink, data in topo["routers"][dut]["links"].items():
+ if "type" in data and data["type"] == "loopback":
+ continue
- if "pim" in data and data["pim"] == "enable":
- pim_interface = data["interface"]
- pim_intf_ip = data["ipv4"].split("/")[0]
+ if "pim" in data and data["pim"] == "enable":
+ pim_interface = data["interface"]
+ pim_intf_ip = data["ipv4"].split("/")[0]
- if pim_interface in show_ip_pim_interface_json:
- pim_intf_json = show_ip_pim_interface_json[pim_interface]
+ if pim_interface in show_ip_pim_interface_json:
+ pim_intf_json = show_ip_pim_interface_json\
+ [pim_interface]
# Verifying PIM interface
- if (
- pim_intf_json["address"] != pim_intf_ip
- and pim_intf_json["state"] != "up"
- ):
- errormsg = (
- "[DUT %s]: PIM interface: %s "
- "PIM interface ip: %s, status check "
- "[FAILED]!! Expected : %s, Found : %s"
- % (
- dut,
- pim_interface,
- pim_intf_ip,
- pim_interface,
- pim_intf_json["state"],
- )
- )
+ if pim_intf_json["address"] != pim_intf_ip and \
+ pim_intf_json["state"] != "up":
+ errormsg = ("[DUT %s]: PIM interface: %s "
+ "PIM interface ip: %s, status check "
+ "[FAILED]!! Expected : %s, Found : %s"
+ %(dut, pim_interface, pim_intf_ip,
+ pim_interface, pim_intf_json["state"]))
return errormsg
- logger.info(
- "[DUT %s]: PIM interface: %s, "
- "interface ip: %s, status: %s"
- " [PASSED]!!",
- dut,
- pim_interface,
- pim_intf_ip,
- pim_intf_json["state"],
- )
- else:
- errormsg = (
- "[DUT %s]: PIM interface: %s "
- "PIM interface ip: %s, is not present "
- % (dut, pim_interface, pim_intf_ip,)
- )
- return errormsg
+ logger.info("[DUT %s]: PIM interface: %s, "
+ "interface ip: %s, status: %s"
+ " [PASSED]!!",
+ dut, pim_interface, pim_intf_ip,
+ pim_intf_json["state"])
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@@ -1846,7 +1849,6 @@ def clear_ip_mroute_verify(tgen, dut):
rnode = tgen.routers()[dut]
- # sleep(60)
logger.info("[DUT: %s]: IP mroutes uptime before clear", dut)
mroute_json_1 = run_frr_cmd(rnode, "show ip mroute json", isjson=True)
@@ -3387,3 +3389,62 @@ def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag):
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
+
+
+@retry(attempts=21, wait=2, return_is_str=True)
+def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip):
+ """
+ Verify all IGMP interface are up and running, config is verified
+ using "show ip igmp interface" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : device under test
+ * `igmp_iface` : interface name
+ * `interface_ip` : interface ip address
+
+ Usage
+ -----
+ result = verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router in tgen.routers():
+ if router != dut:
+ continue
+
+ logger.info("[DUT: %s]: Verifying PIM interface status:",
+ dut)
+
+ rnode = tgen.routers()[dut]
+ show_ip_igmp_interface_json = \
+ run_frr_cmd(rnode, "show ip igmp interface json", isjson=True)
+
+ if igmp_iface in show_ip_igmp_interface_json:
+ igmp_intf_json = show_ip_igmp_interface_json[igmp_iface]
+ # Verifying igmp interface
+ if igmp_intf_json["address"] != interface_ip:
+ errormsg = ("[DUT %s]: igmp interface ip is not correct "
+ "[FAILED]!! Expected : %s, Found : %s"
+ %(dut, igmp_intf_json["address"], interface_ip))
+ return errormsg
+
+ logger.info("[DUT %s]: igmp interface: %s, "
+ "interface ip: %s"
+ " [PASSED]!!",
+ dut, igmp_iface, interface_ip)
+ else:
+ errormsg = ("[DUT %s]: igmp interface: %s "
+ "igmp interface ip: %s, is not present "
+ %(dut, igmp_iface, interface_ip))
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
diff --git a/tests/topotests/lib/send_bsr_packet.py b/tests/topotests/lib/send_bsr_packet.py
new file mode 100755
index 0000000000..c226899324
--- /dev/null
+++ b/tests/topotests/lib/send_bsr_packet.py
@@ -0,0 +1,58 @@
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+import sys
+import argparse
+from scapy.all import Raw, sendp
+import binascii
+
+
+def send_packet(packet, iface, interval, count):
+ """
+ Read BSR packet in Raw format and send it to specified interface
+
+ Parameter:
+ ---------
+ * `packet` : BSR packet in raw format
+ * `interface` : Interface from which packet would be send
+ * `interval` : Interval between the packets
+ * `count` : Number of packets to be sent
+ """
+
+ data = binascii.a2b_hex(packet)
+ p = Raw(load=data)
+ p.show()
+ sendp(p, inter=int(interval), iface=iface, count=int(count))
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Send BSR Raw packet")
+ parser.add_argument("packet", help="Packet in raw format")
+ parser.add_argument("iface", help="Packet send to this ineterface")
+ parser.add_argument("--interval", help="Interval between packets", default=0)
+ parser.add_argument(
+ "--count", help="Number of times packet is sent repetitively", default=0
+ )
+ args = parser.parse_args()
+
+ if not args.packet or not args.iface:
+ sys.exit(1)
+
+ send_packet(args.packet, args.iface, args.interval, args.count)
diff --git a/tests/topotests/lib/snmptest.py b/tests/topotests/lib/snmptest.py
new file mode 100644
index 0000000000..5112500e0b
--- /dev/null
+++ b/tests/topotests/lib/snmptest.py
@@ -0,0 +1,152 @@
+#
+# topogen.py
+# Library of helper functions for NetDEF Topology Tests
+#
+# Copyright (c) 2020 by Volta Networks
+#
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+SNMP library to test snmp walks and gets
+
+Basic usage instructions:
+
+* define an SnmpTester class giving a router, address, community and version
+* use test_oid or test_walk to check values in MIBS
+* see tests/topotest/simple-snmp-test/test_simple_snmp.py for example
+"""
+
+from topolog import logger
+
+
+class SnmpTester(object):
+ "A helper class for testing SNMP"
+
+ def __init__(self, router, iface, community, version):
+ self.community = community
+ self.version = version
+ self.router = router
+ self.iface = iface
+ logger.info(
+ "created SNMP tester: SNMPv{0} community:{1}".format(
+ self.version, self.community
+ )
+ )
+
+ def _snmp_config(self):
+ """
+ Helper function to build a string with SNMP
+ configuration for commands.
+ """
+ return "-v {0} -c {1} {2}".format(self.version, self.community, self.iface)
+
+ @staticmethod
+ def _get_snmp_value(snmp_output):
+ tokens = snmp_output.strip().split()
+
+ num_value_tokens = len(tokens) - 3
+
+ # this copes with the emptys string return
+ if num_value_tokens == 0:
+ return tokens[2]
+
+ if num_value_tokens > 1:
+ output = ""
+ index = 3
+ while index < len(tokens) - 1:
+ output += "{} ".format(tokens[index])
+ index += 1
+ output += "{}".format(tokens[index])
+ return output
+ # third token is the value of the object
+ return tokens[3]
+
+ @staticmethod
+ def _get_snmp_oid(snmp_output):
+ tokens = snmp_output.strip().split()
+
+ # third token onwards is the value of the object
+ return tokens[0].split(".", 1)[1]
+
+ @staticmethod
+ def _get_snmp_oid(snmp_output):
+ tokens = snmp_output.strip().split()
+
+# if len(tokens) > 5:
+# return None
+
+
+ # third token is the value of the object
+ return tokens[0].split('.',1)[1]
+
+ def _parse_multiline(self, snmp_output):
+ results = snmp_output.strip().split("\r\n")
+
+ out_dict = {}
+ out_list = []
+ for response in results:
+ out_dict[self._get_snmp_oid(response)] = self._get_snmp_value(response)
+ out_list.append(self._get_snmp_value(response))
+
+ return out_dict, out_list
+
+ def get(self, oid):
+ cmd = "snmpget {0} {1}".format(self._snmp_config(), oid)
+
+ result = self.router.cmd(cmd)
+ if "not found" in result:
+ return None
+ return self._get_snmp_value(result)
+
+ def get_next(self, oid):
+ cmd = "snmpgetnext {0} {1}".format(self._snmp_config(), oid)
+
+ result = self.router.cmd(cmd)
+ print("get_next: {}".format(result))
+ if "not found" in result:
+ return None
+ return self._get_snmp_value(result)
+
+ def walk(self, oid):
+ cmd = "snmpwalk {0} {1}".format(self._snmp_config(), oid)
+
+ result = self.router.cmd(cmd)
+ return self._parse_multiline(result)
+
+ def test_oid(self, oid, value):
+ print("oid: {}".format(self.get_next(oid)))
+ return self.get_next(oid) == value
+
+ def test_oid_walk(self, oid, values, oids=None):
+ results_dict, results_list = self.walk(oid)
+ print("test_oid_walk: {} {}".format(oid, results_dict))
+ if oids is not None:
+ index = 0
+ for oid in oids:
+ # avoid key error for missing keys
+ if not oid in results_dict.keys():
+ print("FAIL: missing oid key {}".format(oid))
+ return False
+ if results_dict[oid] != values[index]:
+ print("FAIL{} {} |{}| == |{}|".format(oid, index, results_dict[oid], values[index]))
+ return False
+ index += 1
+ return True
+
+ # Return true if 'values' is a subset of 'results_list'
+ print("test {} == {}".format(results_list[: len(values)], values))
+ return results_list[: len(values)] == values
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index 7c52e824c1..f958cc11d3 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -555,6 +555,7 @@ class TopoRouter(TopoGear):
RD_BABEL = 15
RD_PBRD = 16
RD_PATH = 17
+ RD_SNMP = 18
RD = {
RD_ZEBRA: "zebra",
RD_RIP: "ripd",
@@ -572,7 +573,8 @@ class TopoRouter(TopoGear):
RD_SHARP: "sharpd",
RD_BABEL: "babeld",
RD_PBRD: "pbrd",
- RD_PATH: 'pathd',
+ RD_PATH: "pathd",
+ RD_SNMP: "snmpd",
}
def __init__(self, tgen, cls, name, **params):
@@ -657,7 +659,7 @@ class TopoRouter(TopoGear):
Possible daemon values are: TopoRouter.RD_ZEBRA, TopoRouter.RD_RIP,
TopoRouter.RD_RIPNG, TopoRouter.RD_OSPF, TopoRouter.RD_OSPF6,
TopoRouter.RD_ISIS, TopoRouter.RD_BGP, TopoRouter.RD_LDP,
- TopoRouter.RD_PIM, TopoRouter.RD_PBR.
+ TopoRouter.RD_PIM, TopoRouter.RD_PBR, TopoRouter.RD_SNMP.
"""
daemonstr = self.RD.get(daemon)
self.logger.info('loading "{}" configuration: {}'.format(daemonstr, source))
diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py
index 88e6f78b92..fcc6c19868 100644
--- a/tests/topotests/lib/topojson.py
+++ b/tests/topotests/lib/topojson.py
@@ -45,7 +45,7 @@ from lib.common_config import (
from lib.pim import create_pim_config, create_igmp_config
from lib.bgp import create_router_bgp
-from lib.ospf import create_router_ospf
+from lib.ospf import create_router_ospf, create_router_ospf6
ROUTER_LIST = []
@@ -314,6 +314,7 @@ def build_config_from_json(tgen, topo, save_bkup=True):
("igmp", create_igmp_config),
("bgp", create_router_bgp),
("ospf", create_router_ospf),
+ ("ospf6", create_router_ospf6),
]
)
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index ef0ac27118..1e6ef1b2b3 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -1105,7 +1105,8 @@ class Router(Node):
"sharpd": 0,
"babeld": 0,
"pbrd": 0,
- 'pathd': 0
+ "pathd": 0,
+ "snmpd": 0,
}
self.daemons_options = {"zebra": ""}
self.reportCores = True
@@ -1289,6 +1290,8 @@ class Router(Node):
% (self.routertype, self.routertype, self.routertype, daemon)
)
self.waitOutput()
+ if (daemon == "snmpd") and (self.routertype == "frr"):
+ self.cmd('echo "agentXSocket /etc/frr/agentx" > /etc/snmp/frr.conf')
if (daemon == "zebra") and (self.daemons["staticd"] == 0):
# Add staticd with zebra - if it exists
staticd_path = os.path.join(self.daemondir, "staticd")
@@ -1445,6 +1448,20 @@ class Router(Node):
while "staticd" in daemons_list:
daemons_list.remove("staticd")
+ if "snmpd" in daemons_list:
+ snmpd_path = "/usr/sbin/snmpd"
+ snmpd_option = self.daemons_options["snmpd"]
+ self.cmd(
+ "{0} {1} -C -c /etc/frr/snmpd.conf -p /var/run/{2}/snmpd.pid -x /etc/frr/agentx > snmpd.out 2> snmpd.err".format(
+ snmpd_path, snmpd_option, self.routertype
+ )
+ )
+ logger.info("{}: {} snmpd started".format(self, self.routertype))
+
+ # Remove `snmpd` so we don't attempt to start it again.
+ while "snmpd" in daemons_list:
+ daemons_list.remove("snmpd")
+
# Fix Link-Local Addresses
# Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
self.cmd(
@@ -1608,6 +1625,8 @@ class Router(Node):
return "%s: vtysh killed by AddressSanitizer" % (self.name)
for daemon in self.daemons:
+ if daemon == "snmpd":
+ continue
if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
if daemon == "staticd":
diff --git a/tests/topotests/multicast-pim-bsm-topo1/mcast_pim_bsmp_01.json b/tests/topotests/multicast-pim-bsm-topo1/mcast_pim_bsmp_01.json
new file mode 100644
index 0000000000..14cb0bee1d
--- /dev/null
+++ b/tests/topotests/multicast-pim-bsm-topo1/mcast_pim_bsmp_01.json
@@ -0,0 +1,238 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32},
+ "routers": {
+ "b1": {
+ "links": {
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}
+ },
+ "bsm": {
+ "bsr_packets": {
+ "packet1" : {
+ "data": "01005e00000d005056961165080045c000aa5af500000167372a46000001e000000d2400f5ce165b000001004600000101000018e1010100080800000100090a090a0096650001000909090a0096660001000708090a00966700010007070907009668000100070702070096690001000705020700966a0001000702020700966b0001000202020200966c0001000020e1010101010100000100050606050096000001000020e20101010101000001000909090900960000",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"],
+ "225.1.1.1/32": ["5.6.6.5/32"],
+ "225.200.100.100/32": ["210.210.210.210/32"],
+ "226.1.1.1/32": ["9.9.9.9/32"]
+
+ },
+ "Desc" : "Packet with 3 group range - rp prio different"
+ },
+ "packet2" : {
+ "data": "01005e00000d005056961165080045c0009420f400000167714146000001e000000d24000b3b164a000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a0096640001000020e20101010101000001000909090900000000",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"]
+ },
+ "Desc" : "Packet 1 with hold time 0 for 226.1.1.1/32"
+ },
+ "packet3" : {
+ "data": "01005e00000d005056961165080045c000944d0000000167453546000001e000000d2400e52b17c3000001004600000101000018e1010100080800000100090a090a0096650001000909090a0096660001000708090a00966700010007070907009668000100070702070096690001000705020700966a0001000702020700966b0001000202020200966c0001000020e20101010101000001000909090900960000",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"],
+ "226.1.1.1/32": ["9.9.9.9/32"]
+ },
+ "Desc" : "BSR Prio - TC 4"
+ },
+ "packet4" : {
+ "data": "01005e00000d005056961165080045c000aa3d1c00000167550346000001e000000d24000d671c52000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a0096640001000020e1010101010100000100090909090000000001000020e20101010101000001000909090900960000",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"],
+ "225.1.1.1/32": ["9.9.9.9/32"],
+ "226.1.1.1/32": ["9.9.9.9/32"]
+ },
+ "Desc" : "TC - 5"
+ },
+ "packet5" : {
+ "data": "01005e00000d005056961165080045c000aa3d1c00000167550346000001e000000d24000d671c52000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a0096640001000020e1010101010100000100090909090000000001000020e20101010101000001000909090900960000",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"],
+ "226.1.1.1/32": ["9.9.9.9/32"]
+ },
+ "Desc" : "TC - 5, 225.1.1.1 with hold time 0"
+ },
+ "packet6" : {
+ "data": "01005e00000d005056961165080045c0008a795e0000016718e146000001e000000d24006cc509d5000001004600000101000018e10101000707000001000909090a0096660001000708090a00966700010007070907009668000100070702070096690001000705020700966a0001000702020700966b0001000202020200966c0001000020e20101010101000001000909090900960000",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"],
+ "226.1.1.1/32": ["9.9.9.9/32"]
+ },
+ "Desc" : "TC - 6,High prio rp removed on 225.1.1.0/24"
+ },
+ "packet7" : {
+ "data": "01005e00000d005056961165080045c0007e6ebb00000167239046000001e000000d2400090810b3000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a00966400",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"]
+ },
+ "Desc" : "TC - 8, rps with same priority"
+ },
+
+ "packet8" : {
+ "data": "01005e00000d005056b76687080045c000383cdf0000016755b246000001e000000d24008ad51a9f000001004600000101000020e1c86464010100000100d2d2d2d200960000",
+ "group": "225.200.100.100/32",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.200.100.100/32": ["210.210.210.210/32"]
+ },
+ "Desc" : "TC - 30, grp add with all octet"
+ },
+
+ "packet9" : {
+ "data": "01005e00000d005056b76687080045c000387b8600000167170b46000001e000000d2400c6282245000001000101020701000020e1c86464010100000100d2d2d2d200960000",
+ "group": "225.200.100.100/32",
+ "candidate_rp": "210.210.210.210/32",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "1.1.2.7/32",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.200.100.100/32": ["210.210.210.210/32"]
+ },
+ "Desc" : "TC -29, BSM with preferred ip"
+ }
+
+ }
+ }
+ },
+
+ "b2": {
+ "links": {
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}
+ },
+ "bsm": {
+ "bsr_packets": {
+ "packet1" : {
+ "data": "01005e00000d005056b70489080045c0003865db0000016731b641000001e000000d2400659c0c6f000001004100000101000018e10101000101000001002121212100960000",
+ "src_ip": "65.0.0.1/24",
+ "dest_ip": "65.0.0.2/24",
+ "bsr": "65.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["33.33.33.33/32"],
+ "225.200.100.100/32": ["210.210.210.210/32"]
+ }
+ },
+ "packet2" : {
+ "data": "01005e00000d005056b70489080045c00038663000000167316141000001e000000d24006dce0433000a01004100000101000018e10101000101000001002121212100960000",
+ "src_ip": "65.0.0.1/24",
+ "dest_ip": "65.0.0.2/24",
+ "bsr": "65.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["33.33.33.33/32"]
+ }
+ },
+
+ "packet3" : {
+ "data": "01005e00000d005056b76687080045c00038f5c800000167a1c841000001e000000d2400c6621a10000001000a02010101000020e1c86464010100000100d2d2d2d200960000",
+ "src_ip": "65.0.0.1/24",
+ "dest_ip": "65.0.0.2/24",
+ "bsr": "10.2.1.1/32",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.200.100.100/32": ["210.210.210.210/32"]
+ }
+ }
+
+ }
+ }
+ },
+
+ "f1": {
+ "links": {
+ "b1": {"ipv4": "auto", "pim": "enable"},
+ "b2": {"ipv4": "auto", "pim": "enable"},
+ "i1": {"ipv4": "auto", "pim": "enable"},
+ "s1": {"ipv4": "auto", "pim": "enable"}
+ }
+ },
+ "i1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "l1": {"ipv4": "auto", "pim": "enable"}
+ }
+ },
+ "l1": {
+ "links": {
+ "i1": {"ipv4": "auto", "pim": "enable"},
+ "r1": {"ipv4": "auto", "pim": "enable"}
+ },
+ "igmp": {
+ "interfaces": {
+ "l1-r1-eth1" :{
+ "igmp":{
+ "version": "2"
+ }
+ }
+ }
+ }
+ },
+ "s1": {
+ "links": {
+ "f1": {"ipv4": "auto", "pim": "enable"}
+ }
+ },
+ "r1": {
+ "links": {
+ "l1": {"ipv4": "auto", "pim": "disable"}
+ }
+ }
+ }
+}
diff --git a/tests/topotests/multicast-pim-bsm-topo1/test_mcast_pim_bsmp_01.py b/tests/topotests/multicast-pim-bsm-topo1/test_mcast_pim_bsmp_01.py
new file mode 100644
index 0000000000..c670c82d21
--- /dev/null
+++ b/tests/topotests/multicast-pim-bsm-topo1/test_mcast_pim_bsmp_01.py
@@ -0,0 +1,1653 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2020 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test PIM BSM processing basic functionality:
+
+Test steps
+- Create topology (setup module)
+- Bring up topology
+
+Tests covered in this suite
+1. Verify FRR router select higher IP BSR , when 2 BSR present in the network
+2. Verify BSR and RP updated correctly after configuring as black hole address
+3.1 Verify when new router added to the topology, FRR node will send
+ unicast BSM to new router
+3.2 Verify if no forwarding bit is set , FRR is not forwarding the
+ BSM to other PIM nbrs
+3.3 Verify multicast BSM is sent to new router when unicast BSM is disabled
+4.1 Verfiy BSM arrived on non bsm capable interface is dropped and
+ not processed
+4.2 Verify group to RP info updated correctly in FRR node, after shut and
+ no-shut of BSM enable interfaces
+5. Verify static RP is preferred over BSR
+6.1 Verify adding/deleting the group to rp mapping and RP priority
+ multiple times
+6.2 Verify RP and (*,G) detail after PIM process restart on FRR node
+7.1 Verify BSM timeout on FRR1
+7.2 Verify RP state in FRR1 after Bootstrap timer expiry
+8.1 Verify upstream interfaces(IIF) and join state are updated properly
+ after BSM received for FRR
+8.2 Verify IIF and OIL in "show ip pim state" updated properly after
+ BSM received
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ step,
+ addKernelRoute,
+ create_static_routes,
+ iperfSendIGMPJoin,
+ stop_router,
+ start_router,
+ shutdown_bringup_interface,
+ kill_router_daemons,
+ start_router_daemons,
+ reset_config_on_routers,
+ do_countdown,
+ apply_raw_config,
+ kill_iperf,
+ run_frr_cmd,
+ required_linux_kernel_version,
+ topo_daemons,
+)
+
+from lib.pim import (
+ create_pim_config,
+ add_rp_interfaces_and_pim_config,
+ reconfig_interfaces,
+ scapy_send_bsr_raw_packet,
+ find_rp_from_bsrp_info,
+ verify_pim_grp_rp_source,
+ verify_pim_bsr,
+ verify_ip_mroutes,
+ verify_join_state_and_timer,
+ verify_pim_state,
+ verify_upstream_iif,
+ verify_igmp_groups,
+ verify_ip_pim_upstream_rpf,
+ enable_disable_pim_unicast_bsm,
+ enable_disable_pim_bsm,
+ clear_ip_mroute,
+ clear_ip_pim_interface_traffic,
+ verify_pim_interface_traffic,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/mcast_pim_bsmp_01.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+TOPOLOGY = """
+
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+"""
+# Global variables
+NEXT_HOP1 = "70.0.0.1"
+NEXT_HOP2 = "65.0.0.1"
+BSR_IP_1 = "1.1.2.7"
+BSR_IP_2 = "10.2.1.1"
+BSR1_ADDR = "1.1.2.7/32"
+BSR2_ADDR = "10.2.1.1/32"
+
+
+class CreateTopo(Topo):
+ """
+ Test BasicTopo - topology 1
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function"""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.15")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+ logger.info("Master Topology: \n {}".format(TOPOLOGY))
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Local APIs
+#
+#####################################################
+
+
+def clear_bsrp_data(tgen, topo):
+
+ """
+ clear bsm databas after test"
+ Parameters
+ ----------
+ * `tgen`: topogen object
+
+ Usage
+ -----
+ result = clear_bsrp_data(tgen, topo)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ for dut in tgen.routers():
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: clear_bsrp_data")
+
+ run_frr_cmd(rnode, "clear ip pim bsr-data")
+
+ return True
+
+
+def verify_state_incremented(state_before, state_after):
+ """
+ API to compare interface traffic state incrementing
+
+ Parameters
+ ----------
+ * `state_before` : State dictionary for any particular instance
+ * `state_after` : State dictionary for any particular instance
+ """
+
+ for router, state_data in state_before.items():
+ for state, value in state_data.items():
+ if state_before[router][state] >= state_after[router][state]:
+ errormsg = (
+ "[DUT: %s]: state %s value has not"
+ " incremented, Initial value: %s, "
+ "Current value: %s [FAILED!!]"
+ % (
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is "
+ "incremented, Initial value: %s, Current value: %s"
+ " [PASSED!!]",
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+
+ return True
+
+
+def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr, packet):
+ """
+ API to do required configuration to send and receive BSR packet
+ """
+
+ # Re-configure interfaces as per BSR packet
+ result = reconfig_interfaces(tgen, topo, bsr, fhr, packet)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Create static routes
+ if "bsr" in topo["routers"][bsr]["bsm"]["bsr_packets"][packet]:
+ bsr_route = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["bsr"]
+ next_hop = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["src_ip"].split(
+ "/"
+ )[0]
+ next_hop_rp = topo["routers"][fhr]["links"][rp]["ipv4"].split("/")[0]
+ next_hop_lhr = topo["routers"][rp]["links"][lhr]["ipv4"].split("/")[0]
+
+ # Add static routes
+ input_dict = {
+ fhr: {"static_routes": [{"network": bsr_route, "next_hop": next_hop}]},
+ rp: {"static_routes": [{"network": bsr_route, "next_hop": next_hop_rp}]},
+ lhr: {"static_routes": [{"network": bsr_route, "next_hop": next_hop_lhr}]},
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Add kernal route for source
+ group = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["pkt_dst"]
+ bsr_interface = topo["routers"][bsr]["links"][fhr]["interface"]
+ result = addKernelRoute(tgen, bsr, bsr_interface, group)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # RP Mapping
+ rp_mapping = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["rp_mapping"]
+
+ # Add interfaces in RP for all the RPs
+ result = add_rp_interfaces_and_pim_config(tgen, topo, "lo", rp, rp_mapping)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Add kernal routes to sender and receiver
+ for group, rp_list in rp_mapping.items():
+ mask = group.split("/")[1]
+ if int(mask) == 32:
+ group = group.split("/")[0]
+
+ # Add kernal routes for sender
+ s_interface = topo["routers"][sender]["links"][fhr]["interface"]
+ result = addKernelRoute(tgen, sender, s_interface, group)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Add kernal routes for receiver
+ r_interface = topo["routers"][receiver]["links"][lhr]["interface"]
+ result = addKernelRoute(tgen, receiver, r_interface, group)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Add static routes for RPs in FHR and LHR
+ next_hop_fhr = topo["routers"][rp]["links"][fhr]["ipv4"].split("/")[0]
+ next_hop_lhr = topo["routers"][rp]["links"][lhr]["ipv4"].split("/")[0]
+ input_dict = {
+ fhr: {"static_routes": [{"network": rp_list, "next_hop": next_hop_fhr}]},
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ input_dict = {
+ lhr: {"static_routes": [{"network": rp_list, "next_hop": next_hop_lhr}]},
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+ return True
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_BSR_higher_prefer_ip_p0(request):
+ """
+ Verify FRR router select higher IP BSR , when 2 BSR present in the network
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+ step("pre-configure BSM packet")
+ step("Configure cisco-1 as BSR1 1.1.2.7")
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+ step("Configure cisco-1 as BSR1 10.2.1.1")
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+ step("configuring loopback address of b1 and b2 as BSR")
+ intf_lo_addr_b1 = topo["routers"]["b1"]["links"]["lo"]["ipv4"]
+ intf_lo_addr_b2 = topo["routers"]["b2"]["links"]["lo"]["ipv4"]
+
+ raw_config = {
+ "b1": {
+ "raw_config": [
+ "interface lo",
+ "no ip address {}".format(intf_lo_addr_b1),
+ "ip address {}".format(BSR1_ADDR),
+ "ip pim",
+ ]
+ },
+ "b2": {
+ "raw_config": [
+ "interface lo",
+ "no ip address {}".format(intf_lo_addr_b2),
+ "ip address {}".format(BSR2_ADDR),
+ "ip pim",
+ ]
+ },
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "225.200.100.100"
+ step("configuring static routes for both the BSR")
+
+ next_hop_rp = topo["routers"]["f1"]["links"]["i1"]["ipv4"].split("/")[0]
+ next_hop_lhr = topo["routers"]["i1"]["links"]["l1"]["ipv4"].split("/")[0]
+
+ input_dict = {
+ "f1": {
+ "static_routes": [
+ {"network": BSR1_ADDR, "next_hop": NEXT_HOP1},
+ {"network": BSR2_ADDR, "next_hop": NEXT_HOP2},
+ ]
+ },
+ "i1": {
+ "static_routes": [
+ {"network": BSR1_ADDR, "next_hop": next_hop_rp},
+ {"network": BSR2_ADDR, "next_hop": next_hop_rp},
+ ]
+ },
+ "l1": {
+ "static_routes": [
+ {"network": BSR1_ADDR, "next_hop": next_hop_lhr},
+ {"network": BSR2_ADDR, "next_hop": next_hop_lhr},
+ ]
+ },
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ step("Send BSR packet from b1 to FHR")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet9")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+ do_countdown(5)
+
+ dut = "l1"
+ step("Verify if b1 chosen as BSR in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", BSR_IP_1)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ group = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet9"]["group"]
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, BSR_IP_1, group)
+ assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Verify RP in LHR")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("Send BSR packet from b2 to FHR")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b2", "f1", "packet3")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ dut = "l1"
+ step("Verify if b2 chosen as BSR in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", BSR_IP_2)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, BSR_IP_2, group)
+ assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Verify RP in LHR")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("Shut higher prefer BSR2 link f1 to b2")
+
+ f1_b2_eth1 = topo["routers"]["f1"]["links"]["b2"]["interface"]
+ shutdown_bringup_interface(tgen, "f1", "f1-b2-eth1", False)
+
+ step("clearing bsr to timeout old BSR")
+ clear_bsrp_data(tgen, topo)
+
+ step("Send BSR packet from b1 and b2 to FHR")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet9")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b2", "f1", "packet3")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("sleeping for 3 sec to leran new packet")
+ do_countdown(3)
+ step("verify BSR1 is become prefered RP")
+ dut = "l1"
+
+ step("Verify if b1 chosen as BSR in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", BSR_IP_1)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, BSR_IP_1, group)
+ assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Verify RP in LHR")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("NoShut higher prefer BSR2 link f1 to b2")
+ step("sleeping for 3 min to leran new packet")
+ do_countdown(3)
+ f1_b2_eth1 = topo["routers"]["f1"]["links"]["b2"]["interface"]
+ shutdown_bringup_interface(tgen, "f1", "f1-b2-eth1", True)
+ step("verify BSR2 is become prefered RP")
+ dut = "l1"
+
+ step("Send BSR packet from b1 and b2 to FHR")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet9")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b2", "f1", "packet3")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Verify if b2 chosen as BSR in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", BSR_IP_2)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, BSR_IP_2, group)
+ assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Verify RP in LHR")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("Clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+def test_BSR_CRP_with_blackhole_address_p1(request):
+ """
+ Verify BSR and RP updated correctly after configuring as black hole address
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+ step("pre-configure BSM packet")
+ step("Configure cisco-1 as BSR1 1.1.2.7")
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("configuring loopback address of b1 and b2 as BSR")
+ intf_lo_addr_b1 = topo["routers"]["b1"]["links"]["lo"]["ipv4"]
+
+ raw_config = {
+ "b1": {
+ "raw_config": [
+ "interface lo",
+ "no ip address {}".format(intf_lo_addr_b1),
+ "ip address {}".format(BSR1_ADDR),
+ "ip pim",
+ ]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "225.200.100.100"
+ step("configuring static routes for both the BSR")
+
+ next_hop_rp = topo["routers"]["f1"]["links"]["i1"]["ipv4"].split("/")[0]
+ next_hop_lhr = topo["routers"]["i1"]["links"]["l1"]["ipv4"].split("/")[0]
+
+ input_dict = {
+ "f1": {"static_routes": [{"network": BSR1_ADDR, "next_hop": NEXT_HOP1}]},
+ "i1": {"static_routes": [{"network": BSR1_ADDR, "next_hop": next_hop_rp}]},
+ "l1": {"static_routes": [{"network": BSR1_ADDR, "next_hop": next_hop_lhr}]},
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Use scapy to send pre-defined packet from senser to receiver
+
+ group = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet9"]["group"]
+ CRP = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet9"]["candidate_rp"]
+ step("waiting for BSR to timeout before configuring blackhole route")
+ clear_bsrp_data(tgen, topo)
+
+ step("Configure black-hole address for BSR and candidate RP")
+ input_dict = {
+ "f1": {
+ "static_routes": [{"network": [BSR1_ADDR, CRP], "next_hop": "blackhole"}]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ intf_f1_i1 = topo["routers"]["f1"]["links"]["i1"]["interface"]
+ step("Verify bsm transit count is not increamented" "show ip pim interface traffic")
+ state_dict = {"f1": {intf_f1_i1: ["bsmTx"]}}
+
+ state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step("Sending BSR after Configure black hole address for BSR and candidate RP")
+ step("Send BSR packet from b1 to FHR")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet9")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ dut = "l1"
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, BSR_IP_1, group)
+ assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Verify if b1 chosen as BSR in l1")
+ result = verify_pim_bsr(tgen, topo, "l1", BSR_IP_1, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is not True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ step("Remove black-hole address for BSR and candidate RP")
+ input_dict = {
+ "f1": {
+ "static_routes": [
+ {"network": [BSR1_ADDR, CRP], "next_hop": "blackhole", "delete": True}
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Sending BSR after removing black-hole address for BSR and candidate RP")
+ step("Send BSR packet from b1 to FHR")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet9")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Verify if b1 chosen as BSR in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", BSR_IP_1)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ dut = "l1"
+ group = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet9"]["group"]
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, BSR_IP_1, group)
+ assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Verify RP in LHR l1")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+def test_new_router_fwd_p0(request):
+ """
+ 1. Verify when new router added to the topology, FRR node will send
+ unicast BSM to new router
+ 2. Verify if no forwarding bit is set , FRR is not forwarding the
+ BSM to other PIM nbrs
+ 3. Verify multicast BSM is sent to new router when unicast BSM is disabled
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "225.1.1.1"
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
+ time.sleep(1)
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify bsr state in FHR
+ step("Verify if b1 chosen as BSR in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify bsr state in i1
+ step("Verify if b1 chosen as BSR in i1")
+ result = verify_pim_bsr(tgen, topo, "i1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify ip mroute
+ iif = "l1-i1-eth0"
+ src_addr = "*"
+ oil = "l1-r1-eth1"
+
+ step("Verify mroute populated on l1")
+ result = verify_ip_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ # Reload i1 and l1
+ step("Reloading i1 and l1. Stop both. bring up l1 and then i1")
+ stop_router(tgen, "i1")
+ start_router(tgen, "i1")
+ stop_router(tgen, "l1")
+ start_router(tgen, "l1")
+
+ # Verify bsr state in i1
+ step("Verify BSR in i1 after restart while no new bsm sent from b1")
+ result = verify_pim_bsr(tgen, topo, "i1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify bsr state in l1
+ step("Verify no BSR in l1 as i1 would not forward the no-forward bsm")
+ result = verify_pim_bsr(tgen, topo, "l1", bsr_ip, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # unconfigure unicast bsm on f1-i1-eth2
+ step("unconfigure unicast bsm on f1-i1-eth2, will forward with only mcast")
+ enable_disable_pim_unicast_bsm(tgen, "f1", "f1-i1-eth2", enable=False)
+
+ # Reboot i1 to check if still bsm received with multicast address
+ step("Reboot i1 to check if still bsm received with multicast address")
+ stop_router(tgen, "i1")
+ start_router(tgen, "i1")
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify again if BSR is installed from bsm forwarded by f1
+ step("Verify again if BSR is installed from bsm forwarded by f1")
+ result = verify_pim_bsr(tgen, topo, "i1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ step("Send another BSM packet from b1 which will reach l1(LHR)")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet2")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+ do_countdown(5)
+
+ # Verify ip mroute populated again
+ step("Verify mroute again on l1 (lhr)")
+ result = verify_ip_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+def test_int_bsm_config_p1(request):
+ """
+ 1. Verfiy BSM arrived on non bsm capable interface is dropped and
+ not processed
+ 2. Verify group to RP info updated correctly in FRR node, after shut and
+ no-shut of BSM enable interfaces
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "225.1.1.1"
+
+ bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
+ time.sleep(1)
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ step("Send BSM packet from b1")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify bsr state in i1
+ step("Verify if b1 is chosen as BSR in i1")
+ result = verify_pim_bsr(tgen, topo, "i1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # check if mroute installed
+ step("check if mroute installed in i1")
+ iif = "lo"
+ src_addr = "*"
+ oil = "i1-l1-eth1"
+
+ result = verify_ip_mroutes(tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # wait till bsm rp age out
+ step("wait till bsm rp age out")
+ clear_bsrp_data(tgen, topo)
+
+ # check if mroute uninstalled because of rp age out
+ step("check if mroute uninstalled because of rp age out in i1")
+ result = verify_ip_mroutes(
+ tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil, expected=False
+ )
+ assert result is not True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # unconfigure bsm processing on f1 on f1-i1-eth2
+ step("unconfigure bsm processing on f1 in f1-i1-eth2, will drop bsm")
+ result = enable_disable_pim_bsm(tgen, "f1", "f1-i1-eth2", enable=False)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ step("Send BSM packet from b1")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify bsr state in FHR
+ step("Verify if b1 chosen as BSR in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify bsr state in i1
+ step("Verify if b1 is not chosen as BSR in i1")
+ result = verify_pim_bsr(tgen, topo, "i1", bsr_ip, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # check if mroute still not installed because of rp not available
+ step("check if mroute still not installed because of rp not available")
+ result = verify_ip_mroutes(
+ tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil, expected=False
+ )
+ assert result is not True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # configure bsm processing on i1 on f1-i1-eth2
+ step("configure bsm processing on f1 in f1-i1-eth2, will accept bsm")
+ result = enable_disable_pim_bsm(tgen, "f1", "f1-i1-eth2", enable=True)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ step("Send BSM packet again from b1")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet2")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify again if BSR is installed from bsm forwarded by f1
+ step("Verify again if BSR is installed from bsm forwarded by f1")
+ result = verify_pim_bsr(tgen, topo, "i1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # verify ip mroute populated
+ step("Verify ip mroute")
+ result = verify_ip_mroutes(tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Shut/No shut the bsm rpf interface and check mroute on lhr(l1)
+ step("Shut/No shut the bsm rpf interface and check mroute on lhr(l1)")
+ intf = "l1-i1-eth0"
+ shutdown_bringup_interface(tgen, "l1", intf, False)
+ shutdown_bringup_interface(tgen, "l1", intf, True)
+
+ iif = "l1-i1-eth0"
+ oil = "l1-r1-eth1"
+
+ result = verify_ip_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+def test_static_rp_override_p1(request):
+ """
+ Verify static RP is preferred over BSR
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "225.1.1.1"
+ # Use scapy to send pre-defined packet from senser to receiver
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
+ time.sleep(1)
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify bsr state in FHR
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Check igmp groups
+ step("Verify IGMP groups in LHR")
+ dut = "l1"
+ intf = "l1-r1-eth1"
+ result = verify_igmp_groups(tgen, dut, intf, GROUP_ADDRESS)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ group = "225.1.1.1/32"
+
+ # Find the elected rp from bsrp-info
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Check RP detail in LHR
+ step("Verify that BS RP in LHR l1")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ iif = "l1-i1-eth0"
+ # Verify upstream rpf for 225.1.1.1 is chosen as rp1
+ step("Verify upstream rpf for 225.1.1.1 is chosen as bsrp")
+ result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Configure a static rp for the group 225.1.1.1/32
+ step("Configure a static rp 33.33.33.33 for the group 225.1.1.1/32 in l1")
+ input_dict = {
+ "l1": {
+ "pim": {
+ "rp": [
+ {"rp_addr": "33.33.33.33", "group_addr_range": ["225.1.1.1/32"],}
+ ]
+ }
+ }
+ }
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ # Verify that static rp is configured over bsrp
+ static_rp = "33.33.33.33"
+ step("Verify that Static RP in LHR in l1")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "Static", static_rp)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify if upstream also reflects the static rp
+ step("Verify upstream rpf for 225.1.1.1 is chosen as static in l1")
+ result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, static_rp)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # delete static rp for the group 225.1.1.1/32
+ step("Delete static rp 33.33.33.33 for the group 225.1.1.1/32 in l1")
+ input_dict = {
+ "l1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "33.33.33.33",
+ "group_addr_range": ["225.1.1.1/32"],
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ # Verify if bsrp is installed back for the group 225.1.1.1/32
+ step("Verify that BS RP in installed in LHR")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify upstream rpf for 225.1.1.1 is chosen as bsrp
+ step("Verify upstream rpf for 225.1.1.1 is chosen as bsrp in l1")
+ result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+def test_bsmp_stress_add_del_restart_p2(request):
+ """
+ 1. Verify adding/deleting the group to rp mapping and RP priority
+ multiple times
+ 2. Verify RP and (*,G) detail after PIM process restart on FRR node
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "225.1.1.1"
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ step("Send BSR packet from b1 to FHR")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
+ time.sleep(1)
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify bsr state in FHR
+ step("Verify if b1 is chosen as bsr in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ dut = "l1"
+ group = "225.1.1.0/24"
+ # Find the elected rp from bsrp-info
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp1 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp1 is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Check RP detail in LHR
+ step("Verify RP in LHR l1")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp1[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Send BSR packet from b1 after deleting high prio rp for 225.1.1.0/24
+ step("Send BSM from b1 to FHR deleting high prio rp for 225.1.1.0/24")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet6")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Find the elected rp from bsrp-info
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp2 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp2 is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+ logger.info("RP old: %s RP2 new: %s", rp1[group], rp2[group])
+
+ # Verify is the rp is different now
+ assert rp1[group] != rp2[group], "Testcase {} :Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ rp_add1 = rp1[group]
+ rp_add2 = rp2[group]
+
+ # Verify if that rp is installed
+ step("Verify new RP in LHR installed")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp_add2)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("Change rp priority in the bsm and send multiple times")
+
+ for i in range(4):
+ # Send BSR pkt from b1 after putting back high prio rp for 225.1.1.0/24
+ step("Send BSM from b1 to FHR put back high prio rp for 225.1.1.0/24")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Find the elected rp from bsrp-info
+ step("Find the elected rp from bsrp-info in LHR")
+ rp2 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp2 is not {}, "Testcase {} :Failed \n Error : RP not Found".format(
+ tc_name
+ )
+
+ # Verify is the rp is different now
+ step("Verify now old RP is elected again")
+ assert (
+ rp_add1 == rp2[group]
+ ), "Testcase {} :Failed \n Error : rp expected {} rp received {}".format(
+ tc_name, rp_add1,
+ )
+
+ # Verify if that rp is installed
+ step("Verify old RP in LHR installed")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp_add1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Send BSR packet from b1 after deleting high prio rp for 225.1.1.0/24
+ step("Send BSM from b1 to FHR deleting high prio rp for 225.1.1.0/24")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet6")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify if that rp is installed
+ step("Verify new RP(rp2) in LHR installed")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp_add2)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Restart pimd
+ step("Restarting pimd in LHR")
+ kill_router_daemons(tgen, "l1", ["pimd"])
+ start_router_daemons(tgen, "l1", ["pimd"])
+ logger.info("Restarting done")
+
+ # Verify if that rp is installed
+ step("Verify old RP in LHR installed")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp_add2)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Send IGMP join to LHR
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ do_countdown(5)
+
+ # VErify mroute created after pimd restart
+ step("VErify mroute created after pimd restart")
+ iif = "l1-i1-eth0"
+ src_addr = "*"
+ oil = "l1-r1-eth1"
+ result = verify_ip_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_BSM_timeout_p0(request):
+ """
+ Verify BSM timeout on FRR1
+ Verify RP state in FRR1 after Bootstrap timer expiry
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "225.1.1.1"
+
+ bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ step("send BSR packet from b1")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Send IGMP join for group 225.1.1.1 from receiver
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify bsr state in FHR
+ step("Verify bsr state in FHR f1")
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify ip mroute in LHR
+ step(" Verify ip mroute in LHR l1")
+ dut = "l1"
+ iif = "l1-i1-eth0"
+ src_addr = "*"
+ oil = "l1-r1-eth1"
+ result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify join state and join timer
+ step("Verify join state and join timer in lhr l1")
+ result = verify_join_state_and_timer(tgen, dut, iif, src_addr, GROUP_ADDRESS)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify upstream IIF interface
+ step("Verify upstream IIF interface in LHR l1")
+ result = verify_upstream_iif(tgen, dut, iif, src_addr, GROUP_ADDRESS)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify RP mapping
+ dut = "l1"
+ group = "225.1.1.1/32"
+ step("Verify RP mapping in LHR l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp != {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ logger.info("Waiting for 130 secs to check BSR timeout")
+ clear_bsrp_data(tgen, topo)
+
+ # Verify if bsr has aged out
+ step("Verify if bsr has aged out in f1")
+ no_bsr_ip = "0.0.0.0"
+ result = verify_pim_bsr(tgen, topo, "f1", no_bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = verify_pim_grp_rp_source(
+ tgen, topo, "f1", group, rp_source="BSR", expected=False
+ )
+
+ assert result is not True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify RP mapping removed after hold timer expires
+ group = "225.1.1.1/32"
+ step("Verify RP mapping removed after hold timer expires in l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp == {}, "Testcase {} :Failed \n Error : RP found when not expected".format(
+ tc_name
+ )
+
+ # Verify iif is unknown after RP timeout
+ step("Verify iif is unknown after RP timeout in l1")
+ iif = "Unknown"
+ result = verify_upstream_iif(
+ tgen, dut, iif, src_addr, GROUP_ADDRESS, joinState="NotJoined"
+ )
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify join state and join timer
+ step("Verify join state and join timer in l1")
+ iif = "l1-i1-eth0"
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, src_addr, GROUP_ADDRESS, expected=False
+ )
+ assert result is not True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify ip mroute is not installed
+ step("Verify mroute not installed in l1")
+ result = verify_ip_mroutes(
+ tgen, dut, src_addr, GROUP_ADDRESS, iif, oil, expected=False
+ )
+ assert result is not True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+def test_iif_join_state_p0(request):
+ """
+ 1. Verify upstream interfaces(IIF) and join state are updated properly
+ after BSM received for FRR
+ 2. Verify IIF and OIL in "show ip pim state" updated properly after
+ BSM received
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "225.1.1.1"
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
+ time.sleep(1)
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify bsr state in FHR
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Check igmp groups
+ step("Verify IGMP groups in LHR l1")
+ dut = "l1"
+ intf = "l1-r1-eth1"
+ result = verify_igmp_groups(tgen, dut, intf, GROUP_ADDRESS)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ group = "225.1.1.1/32"
+
+ # Find the elected rp from bsrp-info
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Check RP detail in LHR
+ step("Verify RP in LHR l1")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify join state and join timer
+ step("Verify join state and join timer l1")
+ iif = "l1-i1-eth0"
+ src_addr = "*"
+ result = verify_join_state_and_timer(tgen, dut, iif, src_addr, GROUP_ADDRESS)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify upstream IIF interface
+ step("Verify upstream IIF interface l1")
+ result = verify_upstream_iif(tgen, dut, iif, src_addr, GROUP_ADDRESS)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify IIF/OIL in pim state
+ oil = "l1-r1-eth1"
+ result = verify_pim_state(tgen, dut, iif, oil, GROUP_ADDRESS)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify ip mroute
+ src_addr = "*"
+ step("Verify ip mroute in l1")
+ result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Make RP unreachanble in LHR
+ step("Make RP unreachanble in LHR l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ next_hop_lhr = topo["routers"]["i1"]["links"]["l1"]["ipv4"].split("/")[0]
+
+ rp_ip = rp[group] + "/32"
+ input_dict = {
+ "l1": {
+ "static_routes": [
+ {"network": rp_ip, "next_hop": next_hop_lhr, "delete": True}
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Check RP unreachable
+ step("Check RP unreachability")
+ iif = "Unknown"
+ result = verify_upstream_iif(
+ tgen, dut, iif, src_addr, GROUP_ADDRESS, joinState="NotJoined"
+ )
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify that it is not installed
+ step("Verify that it is not installed")
+ iif = "<iif?>"
+ result = verify_pim_state(tgen, dut, iif, oil, GROUP_ADDRESS, installed_fl=0)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify mroute not installed
+ step("Verify mroute not installed")
+ result = verify_ip_mroutes(
+ tgen, dut, src_addr, GROUP_ADDRESS, iif, oil, expected=False
+ )
+ assert result is not True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Add back route for RP to make it reachable
+ step("Add back route for RP to make it reachable")
+ input_dict = {
+ "l1": {"static_routes": [{"network": rp_ip, "next_hop": next_hop_lhr,}]}
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify that (*,G) installed in mroute again
+ iif = "l1-i1-eth0"
+ result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/multicast-pim-bsm-topo2/mcast_pim_bsmp_02.json b/tests/topotests/multicast-pim-bsm-topo2/mcast_pim_bsmp_02.json
new file mode 100644
index 0000000000..14cb0bee1d
--- /dev/null
+++ b/tests/topotests/multicast-pim-bsm-topo2/mcast_pim_bsmp_02.json
@@ -0,0 +1,238 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32},
+ "routers": {
+ "b1": {
+ "links": {
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}
+ },
+ "bsm": {
+ "bsr_packets": {
+ "packet1" : {
+ "data": "01005e00000d005056961165080045c000aa5af500000167372a46000001e000000d2400f5ce165b000001004600000101000018e1010100080800000100090a090a0096650001000909090a0096660001000708090a00966700010007070907009668000100070702070096690001000705020700966a0001000702020700966b0001000202020200966c0001000020e1010101010100000100050606050096000001000020e20101010101000001000909090900960000",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"],
+ "225.1.1.1/32": ["5.6.6.5/32"],
+ "225.200.100.100/32": ["210.210.210.210/32"],
+ "226.1.1.1/32": ["9.9.9.9/32"]
+
+ },
+ "Desc" : "Packet with 3 group range - rp prio different"
+ },
+ "packet2" : {
+ "data": "01005e00000d005056961165080045c0009420f400000167714146000001e000000d24000b3b164a000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a0096640001000020e20101010101000001000909090900000000",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"]
+ },
+ "Desc" : "Packet 1 with hold time 0 for 226.1.1.1/32"
+ },
+ "packet3" : {
+ "data": "01005e00000d005056961165080045c000944d0000000167453546000001e000000d2400e52b17c3000001004600000101000018e1010100080800000100090a090a0096650001000909090a0096660001000708090a00966700010007070907009668000100070702070096690001000705020700966a0001000702020700966b0001000202020200966c0001000020e20101010101000001000909090900960000",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"],
+ "226.1.1.1/32": ["9.9.9.9/32"]
+ },
+ "Desc" : "BSR Prio - TC 4"
+ },
+ "packet4" : {
+ "data": "01005e00000d005056961165080045c000aa3d1c00000167550346000001e000000d24000d671c52000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a0096640001000020e1010101010100000100090909090000000001000020e20101010101000001000909090900960000",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"],
+ "225.1.1.1/32": ["9.9.9.9/32"],
+ "226.1.1.1/32": ["9.9.9.9/32"]
+ },
+ "Desc" : "TC - 5"
+ },
+ "packet5" : {
+ "data": "01005e00000d005056961165080045c000aa3d1c00000167550346000001e000000d24000d671c52000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a0096640001000020e1010101010100000100090909090000000001000020e20101010101000001000909090900960000",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"],
+ "226.1.1.1/32": ["9.9.9.9/32"]
+ },
+ "Desc" : "TC - 5, 225.1.1.1 with hold time 0"
+ },
+ "packet6" : {
+ "data": "01005e00000d005056961165080045c0008a795e0000016718e146000001e000000d24006cc509d5000001004600000101000018e10101000707000001000909090a0096660001000708090a00966700010007070907009668000100070702070096690001000705020700966a0001000702020700966b0001000202020200966c0001000020e20101010101000001000909090900960000",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"],
+ "226.1.1.1/32": ["9.9.9.9/32"]
+ },
+ "Desc" : "TC - 6,High prio rp removed on 225.1.1.0/24"
+ },
+ "packet7" : {
+ "data": "01005e00000d005056961165080045c0007e6ebb00000167239046000001e000000d2400090810b3000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a00966400",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32",
+ "9.9.9.10/32", "7.7.9.7/32",
+ "7.7.2.7/32", "7.5.2.7/32",
+ "7.2.2.7/32", "2.2.2.2/32"]
+ },
+ "Desc" : "TC - 8, rps with same priority"
+ },
+
+ "packet8" : {
+ "data": "01005e00000d005056b76687080045c000383cdf0000016755b246000001e000000d24008ad51a9f000001004600000101000020e1c86464010100000100d2d2d2d200960000",
+ "group": "225.200.100.100/32",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "70.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.200.100.100/32": ["210.210.210.210/32"]
+ },
+ "Desc" : "TC - 30, grp add with all octet"
+ },
+
+ "packet9" : {
+ "data": "01005e00000d005056b76687080045c000387b8600000167170b46000001e000000d2400c6282245000001000101020701000020e1c86464010100000100d2d2d2d200960000",
+ "group": "225.200.100.100/32",
+ "candidate_rp": "210.210.210.210/32",
+ "src_ip": "70.0.0.1/24",
+ "dest_ip": "70.0.0.2/24",
+ "bsr": "1.1.2.7/32",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.200.100.100/32": ["210.210.210.210/32"]
+ },
+ "Desc" : "TC -29, BSM with preferred ip"
+ }
+
+ }
+ }
+ },
+
+ "b2": {
+ "links": {
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}
+ },
+ "bsm": {
+ "bsr_packets": {
+ "packet1" : {
+ "data": "01005e00000d005056b70489080045c0003865db0000016731b641000001e000000d2400659c0c6f000001004100000101000018e10101000101000001002121212100960000",
+ "src_ip": "65.0.0.1/24",
+ "dest_ip": "65.0.0.2/24",
+ "bsr": "65.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["33.33.33.33/32"],
+ "225.200.100.100/32": ["210.210.210.210/32"]
+ }
+ },
+ "packet2" : {
+ "data": "01005e00000d005056b70489080045c00038663000000167316141000001e000000d24006dce0433000a01004100000101000018e10101000101000001002121212100960000",
+ "src_ip": "65.0.0.1/24",
+ "dest_ip": "65.0.0.2/24",
+ "bsr": "65.0.0.1/24",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.1.1.0/24": ["33.33.33.33/32"]
+ }
+ },
+
+ "packet3" : {
+ "data": "01005e00000d005056b76687080045c00038f5c800000167a1c841000001e000000d2400c6621a10000001000a02010101000020e1c86464010100000100d2d2d2d200960000",
+ "src_ip": "65.0.0.1/24",
+ "dest_ip": "65.0.0.2/24",
+ "bsr": "10.2.1.1/32",
+ "pkt_dst": "224.0.0.13",
+ "rp_mapping" : {
+ "225.200.100.100/32": ["210.210.210.210/32"]
+ }
+ }
+
+ }
+ }
+ },
+
+ "f1": {
+ "links": {
+ "b1": {"ipv4": "auto", "pim": "enable"},
+ "b2": {"ipv4": "auto", "pim": "enable"},
+ "i1": {"ipv4": "auto", "pim": "enable"},
+ "s1": {"ipv4": "auto", "pim": "enable"}
+ }
+ },
+ "i1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "l1": {"ipv4": "auto", "pim": "enable"}
+ }
+ },
+ "l1": {
+ "links": {
+ "i1": {"ipv4": "auto", "pim": "enable"},
+ "r1": {"ipv4": "auto", "pim": "enable"}
+ },
+ "igmp": {
+ "interfaces": {
+ "l1-r1-eth1" :{
+ "igmp":{
+ "version": "2"
+ }
+ }
+ }
+ }
+ },
+ "s1": {
+ "links": {
+ "f1": {"ipv4": "auto", "pim": "enable"}
+ }
+ },
+ "r1": {
+ "links": {
+ "l1": {"ipv4": "auto", "pim": "disable"}
+ }
+ }
+ }
+}
diff --git a/tests/topotests/multicast-pim-bsm-topo2/test_mcast_pim_bsmp_02.py b/tests/topotests/multicast-pim-bsm-topo2/test_mcast_pim_bsmp_02.py
new file mode 100644
index 0000000000..459afb5a02
--- /dev/null
+++ b/tests/topotests/multicast-pim-bsm-topo2/test_mcast_pim_bsmp_02.py
@@ -0,0 +1,1115 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2020 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test PIM BSM processing basic functionality:
+
+Test steps
+- Create topology (setup module)
+- Bring up topology
+
+Tests covered in this suite
+1. Verify (*,G) mroute detail on FRR router after BSM rp installed
+2. Verify group to RP updated correctly on FRR router, when BSR advertising
+ the overlapping group address
+3. Verify group to RP info is updated correctly, when BSR advertising the
+ same RP with different priority
+4. Verify group to RP mapping in FRR node when 2 BSR are present in the network
+ and both are having same BSR priority
+5. Verify RP is selected based on hash function, when BSR advertising the group
+ to RP mapping with same priority
+6. Verify fragmentation of bootstrap message
+7. Verify when candidate RP advertised with 32 mask length
+ and contain all the contacts
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ step,
+ addKernelRoute,
+ create_static_routes,
+ iperfSendIGMPJoin,
+ stop_router,
+ start_router,
+ shutdown_bringup_interface,
+ kill_router_daemons,
+ start_router_daemons,
+ reset_config_on_routers,
+ do_countdown,
+ apply_raw_config,
+ kill_iperf,
+ run_frr_cmd,
+ required_linux_kernel_version,
+ topo_daemons,
+)
+
+from lib.pim import (
+ create_pim_config,
+ add_rp_interfaces_and_pim_config,
+ reconfig_interfaces,
+ scapy_send_bsr_raw_packet,
+ find_rp_from_bsrp_info,
+ verify_pim_grp_rp_source,
+ verify_pim_bsr,
+ verify_ip_mroutes,
+ verify_join_state_and_timer,
+ verify_pim_state,
+ verify_upstream_iif,
+ verify_igmp_groups,
+ verify_ip_pim_upstream_rpf,
+ enable_disable_pim_unicast_bsm,
+ enable_disable_pim_bsm,
+ clear_ip_mroute,
+ clear_ip_pim_interface_traffic,
+ verify_pim_interface_traffic,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/mcast_pim_bsmp_02.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+TOPOLOGY = """
+
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+"""
+# Global variables
+NEXT_HOP1 = "70.0.0.1"
+NEXT_HOP2 = "65.0.0.1"
+BSR_IP_1 = "1.1.2.7"
+BSR_IP_2 = "10.2.1.1"
+BSR1_ADDR = "1.1.2.7/32"
+BSR2_ADDR = "10.2.1.1/32"
+
+
+class CreateTopo(Topo):
+ """
+ Test BasicTopo - topology 1
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function"""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.15")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+ logger.info("Master Topology: \n {}".format(TOPOLOGY))
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Local APIs
+#
+#####################################################
+
+
+def clear_bsrp_data(tgen, topo):
+
+ """
+ clear bsm databas after test"
+ Parameters
+ ----------
+ * `tgen`: topogen object
+
+ Usage
+ -----
+ result = clear_bsrp_data(tgen, topo)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ for dut in tgen.routers():
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: clear_bsrp_data")
+
+ run_frr_cmd(rnode, "clear ip pim bsr-data")
+
+ return True
+
+
+def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr, packet):
+ """
+ API to do required configuration to send and receive BSR packet
+ """
+
+ # Re-configure interfaces as per BSR packet
+ result = reconfig_interfaces(tgen, topo, bsr, fhr, packet)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Create static routes
+ if "bsr" in topo["routers"][bsr]["bsm"]["bsr_packets"][packet]:
+ bsr_route = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["bsr"]
+ next_hop = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["src_ip"].split(
+ "/"
+ )[0]
+ next_hop_rp = topo["routers"][fhr]["links"][rp]["ipv4"].split("/")[0]
+ next_hop_lhr = topo["routers"][rp]["links"][lhr]["ipv4"].split("/")[0]
+
+ # Add static routes
+ input_dict = {
+ fhr: {"static_routes": [{"network": bsr_route, "next_hop": next_hop}]},
+ rp: {"static_routes": [{"network": bsr_route, "next_hop": next_hop_rp}]},
+ lhr: {"static_routes": [{"network": bsr_route, "next_hop": next_hop_lhr}]},
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Add kernal route for source
+ group = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["pkt_dst"]
+ bsr_interface = topo["routers"][bsr]["links"][fhr]["interface"]
+ result = addKernelRoute(tgen, bsr, bsr_interface, group)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # RP Mapping
+ rp_mapping = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["rp_mapping"]
+
+ # Add interfaces in RP for all the RPs
+ result = add_rp_interfaces_and_pim_config(tgen, topo, "lo", rp, rp_mapping)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Add kernal routes to sender and receiver
+ for group, rp_list in rp_mapping.items():
+ mask = group.split("/")[1]
+ if int(mask) == 32:
+ group = group.split("/")[0]
+
+ # Add kernal routes for sender
+ s_interface = topo["routers"][sender]["links"][fhr]["interface"]
+ result = addKernelRoute(tgen, sender, s_interface, group)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Add kernal routes for receiver
+ r_interface = topo["routers"][receiver]["links"][lhr]["interface"]
+ result = addKernelRoute(tgen, receiver, r_interface, group)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Add static routes for RPs in FHR and LHR
+ next_hop_fhr = topo["routers"][rp]["links"][fhr]["ipv4"].split("/")[0]
+ next_hop_lhr = topo["routers"][rp]["links"][lhr]["ipv4"].split("/")[0]
+ input_dict = {
+ fhr: {"static_routes": [{"network": rp_list, "next_hop": next_hop_fhr}]},
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ input_dict = {
+ lhr: {"static_routes": [{"network": rp_list, "next_hop": next_hop_lhr}]},
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+ return True
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_starg_mroute_p0(request):
+ """
+ 1. Verify (*,G) mroute detail on FRR router after BSM rp installed
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "226.1.1.1"
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
+ time.sleep(1)
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify bsr state in FHR
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Check igmp groups
+ step("Verify IGMP groups in LHR l1")
+ dut = "l1"
+ intf = "l1-r1-eth1"
+ result = verify_igmp_groups(tgen, dut, intf, GROUP_ADDRESS)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ group = "226.1.1.1/32"
+ src_addr = "*"
+
+ # Find the elected rp from bsrp-info
+ step("Find the elected rp from bsrp-info in LHR in l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Check RP detail in LHR
+ step("Verify RP in LHR in l1")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify join state and join timer
+ step("Verify join state and join timer in l1")
+ iif = "l1-i1-eth0"
+ result = verify_join_state_and_timer(tgen, dut, iif, src_addr, GROUP_ADDRESS)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify upstream IIF interface
+ step("Verify upstream IIF interface in l1")
+ result = verify_upstream_iif(tgen, dut, iif, src_addr, GROUP_ADDRESS)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify IIF/OIL in pim state
+ oil = "l1-r1-eth1"
+ result = verify_pim_state(tgen, dut, iif, oil, GROUP_ADDRESS)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify ip mroute
+ step("Verify ip mroute in l1")
+ src_addr = "*"
+ result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Remove the group rp mapping and send bsm
+ step("Remove the grp-rp mapping by sending bsm with hold time 0 for grp-rp")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet2")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Check RP unreachable
+ step("Check RP unreachability in l1")
+ iif = "Unknown"
+ result = verify_upstream_iif(
+ tgen, dut, iif, src_addr, GROUP_ADDRESS, joinState="NotJoined"
+ )
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify that it is not installed
+ step("Verify that iif is not installed in l1")
+ iif = "<iif?>"
+ result = verify_pim_state(tgen, dut, iif, oil, GROUP_ADDRESS, installed_fl=0)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify mroute not installed
+ step("Verify mroute not installed in l1")
+ result = verify_ip_mroutes(
+ tgen, dut, src_addr, GROUP_ADDRESS, iif, oil, wait=20, expected=False
+ )
+ assert result is not True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Send BSM again to configure rp
+ step("Add back RP by sending BSM from b1")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify that (*,G) installed in mroute again
+ iif = "l1-i1-eth0"
+ result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+def test_overlapping_group_p0(request):
+ """
+ Verify group to RP updated correctly on FRR router, when BSR advertising
+ the overlapping group address
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "225.1.1.1"
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ step("Send BSR packet from b1 to FHR")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet4")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
+ time.sleep(1)
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify bsr state in FHR
+ step("Verify if b1 is chosen as bsr in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ dut = "l1"
+ group1 = "225.1.1.1/32"
+ # Find the elected rp from bsrp-info fro group 225.1.1.1/32
+ step("Find the elected rp from bsrp-info in LHR for 225.1.1.1/32")
+ rp1 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group1)
+ assert rp1 is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ group2 = "225.1.1.0/24"
+ # Find the elected rp from bsrp-info fro group 225.1.1.0/24
+ step("Find the elected rp from bsrp-info in LHR for 225.1.1.0/24")
+ rp2 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group2)
+ assert rp2 is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ iif = "l1-i1-eth0"
+ # Verify upstream rpf for 225.1.1.1 is chosen as rp1
+ step("Verify upstream rpf for 225.1.1.1 is chosen as rp1 in l1")
+ result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Send BSR packet from b1 with rp for 225.1.1.1/32 removed
+ step("Send BSR packet from b1 with rp for 225.1.1.1/32 removed")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet5")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify upstream rpf for 225.1.1.1 is chosen as rp1
+ step("Verify upstream rpf for 225.1.1.1 is chosen as rp2 in l1")
+ result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp2)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify IIF/OIL in pim state
+ step("Verify iif is installed after rp change in l1")
+ oil = "l1-r1-eth1"
+ result = verify_pim_state(tgen, dut, iif, oil, GROUP_ADDRESS)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+def test_RP_priority_p0(request):
+ """
+ Verify group to RP info is updated correctly, when BSR advertising the
+ same RP with different priority
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "225.1.1.1"
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ step("Send BSR packet from b1 to FHR")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
+ time.sleep(1)
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify bsr state in FHR
+ step("Verify if b1 is chosen as bsr in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ dut = "l1"
+ group = "225.1.1.0/24"
+ # Find the elected rp from bsrp-info
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp1 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp1 is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Check RP detail in LHR
+ step("Verify RP in LHR l1")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp1[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Send BSR packet from b1 after deleting high prio rp for 225.1.1.0/24
+ step("Send BSM from b1 to FHR deleting high prio rp for 225.1.1.0/24")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet6")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Find the elected rp from bsrp-info
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp2 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp2 is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+ logger.info("RP old: {} RP2 new: {} ".format(rp1[group], rp2[group]))
+
+ # Verify is the rp is different now
+ assert rp1[group] != rp2[group], "Testcase {} :Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ rp_add1 = rp1[group]
+ rp_add2 = rp2[group]
+
+ # Verify if that rp is installed
+ step("Verify new RP in LHR installed")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp_add2)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Send BSR packet from b1 after putting back high prio rp for 225.1.1.0/24
+ step("Send BSM from b1 to FHR put back old high prio rp for 225.1.1.0/24")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Find the elected rp from bsrp-info
+ step("Find the elected rp from bsrp-info in LHR")
+ rp2 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp2 is not {}, "Testcase {} :Failed \n Error : RP not Found".format(tc_name)
+
+ # Verify is the rp is different now
+ step("Verify now old RP is elected again")
+ assert (
+ rp_add1 == rp2[group]
+ ), "Testcase {} :Failed \n Error : rp expected {} rp received {}".format(
+ tc_name, rp_add1,
+ )
+
+ # Verify if that rp is installed
+ step("Verify new RP in LHR installed")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp_add1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+def test_BSR_election_p0(request):
+ """
+ Verify group to RP mapping in FRR node when 2 BSR are present in the network
+ and both are having same BSR priority
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "225.1.1.1"
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ step("Send BSR packet from b1 to FHR")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet3")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ bsr_ip1 = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[
+ 0
+ ]
+ time.sleep(1)
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify bsr state in FHR
+ step("Verify if b1 is chosen as bsr in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip1)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ dut = "l1"
+ group = "225.1.1.0/24"
+ # Find the elected rp from bsrp-info
+ step("Find the elected rp from bsrp-info in LHR in l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip1, group)
+ assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Check RP detail in LHR
+ step("Verify RP in LHR l1")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Send BSR packet from b2 with same priority
+ step("Send BSR packet from b2 to FHR with same priority")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b2", "f1", "packet1")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ bsr_ip2 = topo["routers"]["b2"]["bsm"]["bsr_packets"]["packet2"]["bsr"].split("/")[
+ 0
+ ]
+ time.sleep(1)
+
+ logger.info("BSR b1:" + bsr_ip1 + " BSR b2:" + bsr_ip2)
+ # Verify bsr state in FHR
+ step("Verify if b2 is not chosen as bsr in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip2, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify if b1 is still chosen as bsr
+ step("Verify if b1 is still chosen as bsr in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip1)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify if that rp is installed
+ step("Verify that same RP in istalled in LHR l1")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+def test_RP_hash_p0(request):
+ """
+ Verify RP is selected based on hash function, when BSR advertising the group
+ to RP mapping with same priority
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "225.1.1.1"
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet7")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
+ time.sleep(1)
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ dut = "l1"
+
+ # Verify bsr state in FHR
+ step("Verify if b1 chosen as BSR in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ group = "225.1.1.0/24"
+
+ # Find the elected rp from bsrp-info
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify if RP with highest hash value is chosen
+ step("Verify if RP(2.2.2.2) with highest hash value is chosen in l1")
+ if rp[group] == "2.2.2.2":
+ result = True
+ else:
+ result = "rp expected: 2.2.2.2 got:" + rp[group]
+
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Check RP detail in LHR
+ step("Verify RP in LHR")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+def test_BSM_fragmentation_p1(request):
+ """
+ Verify fragmentation of bootstrap message
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ reset_config_on_routers(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ GROUP_ADDRESS = "225.1.1.1"
+
+ bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
+
+ step("Send BSM and verify if all routers have same bsrp before fragment")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1")
+ # Verify bsr state in FHR
+ step("Verify if b1 chosen as BSR in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ fhr_node = tgen.routers()["f1"]
+ inter_node = tgen.routers()["i1"]
+ lhr_node = tgen.routers()["l1"]
+
+ # Verify if bsrp list is same across f1, i1 and l1
+ step("Verify if bsrp list is same across f1, i1 and l1")
+ bsrp_f1 = fhr_node.vtysh_cmd("show ip pim bsrp-info json", isjson=True)
+ logger.info("show_ip_pim_bsrp_info_json f1: \n %s", bsrp_f1)
+ bsrp_i1 = inter_node.vtysh_cmd("show ip pim bsrp-info json", isjson=True)
+ logger.info("show_ip_pim_bsrp_info_json i1: \n %s", bsrp_i1)
+ bsrp_l1 = lhr_node.vtysh_cmd("show ip pim bsrp-info json", isjson=True)
+ logger.info("show_ip_pim_bsrp_info_json l1: \n %s", bsrp_l1)
+
+ if bsrp_f1 == bsrp_l1:
+ result = True
+ else:
+ result = "bsrp info in f1 is not same in l1"
+
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # set mtu of fhr(f1) to i1 interface to 100 so that bsm fragments
+ step("set mtu of fhr(f1) to i1 interface to 100 so that bsm fragments")
+ fhr_node.run("ifconfig f1-i1-eth2 mtu 100")
+ inter_node.run("ifconfig i1-f1-eth0 mtu 100")
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet2")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ # Verify bsr state in FHR
+ step("Verify if b1 chosen as BSR")
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ # Verify if bsrp list is same across f1, i1 and l1
+ step("Verify if bsrp list is same across f1, i1 and l1 after fragmentation")
+ bsrp_f1 = fhr_node.vtysh_cmd("show ip pim bsrp-info json", isjson=True)
+ logger.info("show_ip_pim_bsrp_info_json f1: \n %s", bsrp_f1)
+ bsrp_i1 = inter_node.vtysh_cmd("show ip pim bsrp-info json", isjson=True)
+ logger.info("show_ip_pim_bsrp_info_json i1: \n %s", bsrp_i1)
+ bsrp_l1 = lhr_node.vtysh_cmd("show ip pim bsrp-info json", isjson=True)
+ logger.info("show_ip_pim_bsrp_info_json l1: \n %s", bsrp_l1)
+
+ if bsrp_f1 == bsrp_l1:
+ result = True
+ else:
+ result = "bsrp info in f1 is not same in l1"
+
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+def test_RP_with_all_ip_octet_p1(request):
+ """
+ Verify when candidate RP advertised with 32 mask length
+ and contain all the contacts
+
+ Topology used:
+ b1_____
+ |
+ |
+ s1-----f1-----i1-----l1----r1
+ |
+ ______|
+ b2
+
+ b1 - BSR 1
+ b2 - BSR 2
+ s1 - Source
+ f1 - FHR
+ i1 - Intermediate Router (also RP)
+ r1 - Receiver
+
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ step("pre-configure BSM packet")
+ result = pre_config_to_bsm(
+ tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Send the IGMP group (225.100.100.100) from receiver connected to FRR")
+ GROUP_ADDRESS = "225.200.100.100"
+
+ # Use scapy to send pre-defined packet from senser to receiver
+ step("Configure cisco-1 as BSR1")
+ result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet8")
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet8"]["bsr"].split("/")[0]
+ time.sleep(1)
+
+ result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ dut = "l1"
+ step(
+ "Groups are shown with candidate RP with correct mask length 'show ip pim bsrp-info'"
+ )
+ step("Verify if b1 chosen as BSR in f1")
+ result = verify_pim_bsr(tgen, topo, "f1", bsr_ip)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ group = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet9"]["group"]
+ step("Find the elected rp from bsrp-info in LHR l1")
+ rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group)
+ assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Verify RP in LHR")
+ result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group])
+ assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
+
+ step("clear BSM database before moving to next case")
+ clear_bsrp_data(tgen, topo)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/multicast-pim-sm-topo1/multicast_pim_sm_topo1.json b/tests/topotests/multicast-pim-sm-topo1/multicast_pim_sm_topo1.json
new file mode 100644
index 0000000000..71454c2ab2
--- /dev/null
+++ b/tests/topotests/multicast-pim-sm-topo1/multicast_pim_sm_topo1.json
@@ -0,0 +1,140 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32},
+ "routers": {
+ "l1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "i1": {"ipv4": "auto", "pim": "enable"},
+ "i6": {"ipv4": "auto", "pim": "enable"},
+ "i7": {"ipv4": "auto", "pim": "enable"},
+ "r2": {"ipv4": "auto", "pim": "enable"},
+ "c1": {"ipv4": "auto", "pim": "enable"}
+ },
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1" :{
+ "igmp":{
+ "version": "2"
+ }
+ }
+ }
+ },
+ "static_routes": [{
+ "network": ["1.0.5.17/32", "10.0.5.0/24", "10.0.7.0/24", "10.0.6.0/24", "10.0.9.0/24"],
+ "next_hop": "10.0.12.2"
+ },
+ {
+ "network": ["1.0.1.2/32", "1.0.3.5/32", "10.0.1.0/24", "1.0.2.2/32", "10.0.4.0/24", "10.0.3.0/24"],
+ "next_hop": "10.0.2.1"
+ }]
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "l1": {"ipv4": "auto", "pim": "enable"},
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "i3": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["10.0.5.0/24", "10.0.6.0/24", "1.0.2.2/32", "10.0.1.0/24"],
+ "next_hop": "10.0.7.1"
+ },
+ {
+ "network": ["1.0.1.2/32", "10.0.8.0/24", "10.0.10.0/24", "10.0.4.0/24", "10.0.0.0/24", "10.0.11.0/24", "10.0.1.0/24", "10.0.2.0/24"],
+ "next_hop": "10.0.12.1"
+ }]
+ },
+ "f1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2": {"ipv4": "auto", "pim": "enable"},
+ "c2": {"ipv4": "auto", "pim": "enable"},
+ "i2": {"ipv4": "auto", "pim": "enable"},
+ "i8": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["1.0.5.17/32", "10.0.8.0/24", "10.0.9.0/24", "10.0.10.0/24", "10.0.12.0/24", "10.0.11.0/24"],
+ "next_hop": "10.0.7.2"
+ },
+ {
+ "network": ["1.0.2.2/32", "10.0.1.0/24", "10.0.0.0/24", "10.0.4.0/24", "1.0.1.2/32"],
+ "next_hop": "10.0.3.1"
+ }]
+ },
+ "c1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "c2": {"ipv4": "auto", "pim": "enable"},
+ "l1": {"ipv4": "auto", "pim": "enable"},
+ "i4": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["1.0.5.17/32", "10.0.6.0/24", "10.0.3.0/24", "10.0.8.0/24", "10.0.9.0/24", "10.0.12.0/24", "10.0.10.0/24", "10.0.11.0/24"],
+ "next_hop": "10.0.2.2"
+ },
+ {
+ "network": ["10.0.5.0/24", "10.0.7.0/24", "1.0.3.5/32", "10.0.6.0/24", "1.0.2.2/32", "10.0.1.0/24", "10.0.4.0/24"],
+ "next_hop": "10.0.0.2"
+ }]
+ },
+ "c2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "c1": {"ipv4": "auto", "pim": "enable"},
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "i5": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["1.0.5.17/32", "10.0.5.0/24", "10.0.6.0/24", "10.0.7.0/24", "10.0.8.0/24", "10.0.9.0/24", "10.0.7.0/24", "10.0.10.0/24", "10.0.11.0/24"],
+ "next_hop": "10.0.3.2"
+ },
+ {
+ "network": ["1.0.1.2/32", "10.0.4.0/24", "10.0.2.0/24"],
+ "next_hop": "10.0.0.1"
+ }]
+ },
+ "i1": {
+ "links": {
+ "l1": {"ipv4": "auto"}
+ }
+ },
+ "i2": {
+ "links": {
+ "f1": {"ipv4": "auto"}
+ }
+ },
+ "i3": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i4": {
+ "links": {
+ "c1": {"ipv4": "auto"}
+ }
+ },
+ "i5": {
+ "links": {
+ "c2": {"ipv4": "auto"}
+ }
+ },
+ "i6": {
+ "links": {
+ "l1": {"ipv4": "auto"}
+ }
+ },
+ "i7": {
+ "links": {
+ "l1": {"ipv4": "auto"}
+ }
+ },
+ "i8": {
+ "links": {
+ "f1": {"ipv4": "auto"}
+ }
+ }
+ }
+}
diff --git a/tests/topotests/multicast-pim-sm-topo1/test_multicast_pim_sm_topo1.py b/tests/topotests/multicast-pim-sm-topo1/test_multicast_pim_sm_topo1.py
new file mode 100755
index 0000000000..ac675c5c2f
--- /dev/null
+++ b/tests/topotests/multicast-pim-sm-topo1/test_multicast_pim_sm_topo1.py
@@ -0,0 +1,1698 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2020 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test multicast pim sm:
+
+Test steps
+- Create topology (setup module)
+- Bring up topology
+
+Following tests are covered:
+1. TC_1_1: Verify Multicast data traffic with static RP, (*,g) and
+ (s,g) OIL updated correctly
+2. TC_1_2: Verify Multicast data traffic with static RP, (*,g) and
+ (s,g) OIL updated correctly
+3. TC_4: Verify removing the RP should not impact the multicast
+ data traffic
+4. TC_5: Verify (*,G) and (S,G) entry populated again after clear the
+ PIM nbr and mroute from FRR node
+5. TC_9: Verify (s,g) timeout from FHR and RP when same receive
+ exist in LHR , FHR and RP
+6. TC_19: Verify mroute detail when same receiver joining 5
+ different sources
+7. TC_16: Verify (*,G) and (S,G) populated correctly
+ when FRR is the transit router
+8. TC_23: Verify (S,G) should not create if RP is not reachable
+9. TC_24: Verify modification of IGMP query timer should get update
+ accordingly
+10. TC_25: Verify modification of IGMP max query response timer
+ should get update accordingly
+"""
+
+import os
+import sys
+import json
+import time
+import datetime
+from time import sleep
+import pytest
+
+pytestmark = pytest.mark.pimd
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ step,
+ iperfSendIGMPJoin,
+ addKernelRoute,
+ reset_config_on_routers,
+ iperfSendTraffic,
+ kill_iperf,
+ shutdown_bringup_interface,
+ kill_router_daemons,
+ start_router,
+ start_router_daemons,
+ stop_router,
+ required_linux_kernel_version,
+ topo_daemons,
+)
+from lib.pim import (
+ create_pim_config,
+ create_igmp_config,
+ verify_igmp_groups,
+ verify_ip_mroutes,
+ verify_pim_interface_traffic,
+ verify_upstream_iif,
+ verify_pim_neighbors,
+ verify_pim_state,
+ verify_ip_pim_join,
+ clear_ip_mroute,
+ clear_ip_pim_interface_traffic,
+ verify_igmp_config,
+ clear_ip_mroute_verify
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/multicast_pim_sm_topo1.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+TOPOLOGY = """
+
+
+ i4-----c1-------------c2---i5
+ | |
+ | |
+ i1-----l1------r2-----f1---i2
+ | | | |
+ | | | |
+ i7 i6 i3 i8
+
+ Description:
+ i1, i2, i3. i4, i5, i6, i7, i8 - FRR running iperf to send IGMP
+ join and traffic
+ l1 - LHR
+ f1 - FHR
+ r2 - FRR router
+ c1 - FRR router
+ c2 - FRR router
+"""
+
+# Global variables
+GROUP_RANGE = "225.0.0.0/8"
+IGMP_JOIN = "225.1.1.1"
+GROUP_RANGE_1 = [
+ "225.1.1.1/32",
+ "225.1.1.2/32",
+ "225.1.1.3/32",
+ "225.1.1.4/32",
+ "225.1.1.5/32",
+]
+IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"]
+GROUP_RANGE_2 = [
+ "226.1.1.1/32",
+ "226.1.1.2/32",
+ "226.1.1.3/32",
+ "226.1.1.4/32",
+ "226.1.1.5/32",
+]
+IGMP_JOIN_RANGE_2 = ["226.1.1.1", "226.1.1.2", "226.1.1.3", "226.1.1.4", "226.1.1.5"]
+
+GROUP_RANGE_3 = [
+ "227.1.1.1/32",
+ "227.1.1.2/32",
+ "227.1.1.3/32",
+ "227.1.1.4/32",
+ "227.1.1.5/32",
+]
+IGMP_JOIN_RANGE_3 = ["227.1.1.1", "227.1.1.2", "227.1.1.3", "227.1.1.4", "227.1.1.5"]
+
+
+class CreateTopo(Topo):
+ """
+ Test BasicTopo - topology 1
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function"""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.19")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+ logger.info("Master Topology: \n {}".format(TOPOLOGY))
+
+ logger.info("Running setup_module to create topology")
+
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False
+):
+ """
+ API to do pre-configuration to send IGMP join and multicast
+ traffic
+
+ parameters:
+ -----------
+ * `tgen`: topogen object
+ * `topo`: input json data
+ * `tc_name`: caller test case name
+ * `iperf`: router running iperf
+ * `iperf_intf`: interface name router running iperf
+ * `GROUP_RANGE`: group range
+ * `join`: IGMP join, default False
+ * `traffic`: multicast traffic, default False
+ """
+
+ if join:
+ # Add route to kernal
+ result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ if traffic:
+ # Add route to kernal
+ result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ router_list = tgen.routers()
+ for router in router_list.keys():
+ if router == iperf:
+ continue
+
+ rnode = router_list[router]
+ rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter")
+
+ return True
+
+
+def verify_state_incremented(state_before, state_after):
+ """
+ API to compare interface traffic state incrementing
+
+ Parameters
+ ----------
+ * `state_before` : State dictionary for any particular instance
+ * `state_after` : State dictionary for any particular instance
+ """
+
+ for router, state_data in state_before.items():
+ for state, value in state_data.items():
+ if state_before[router][state] >= state_after[router][state]:
+ errormsg = (
+ "[DUT: %s]: state %s value has not"
+ " incremented, Initial value: %s, "
+ "Current value: %s [FAILED!!]"
+ % (
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is "
+ "incremented, Initial value: %s, Current value: %s"
+ " [PASSED!!]",
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+
+ return True
+
+
+def test_multicast_data_traffic_static_RP_send_join_then_traffic_p0(request):
+ """
+ TC_1_1: Verify Multicast data traffic with static RP, (*,g) and
+ (s,g) OIL updated correctly
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)")
+ intf_i1_l1 = topo["routers"]["i1"]["links"]["l1"]["interface"]
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i1", intf_i1_l1, GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("joinRx value before join sent")
+ intf_r2_l1 = topo["routers"]["r2"]["links"]["l1"]["interface"]
+ state_dict = {"r2": {intf_r2_l1: ["joinRx"]}}
+ state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase {} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send the IGMP join first and then start the traffic")
+
+ step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send multicast traffic from FRR3 to 225.1.1.1 receiver")
+ intf_i2_f1 = topo["routers"]["i2"]["links"]["f1"]["interface"]
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i2", intf_i2_f1, GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verify 'show ip mroute' showing correct RPF and OIF"
+ " interface for (*,G) and (S,G) entries on all the nodes"
+ )
+
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"]
+ intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ intf_r2_l1 = topo["routers"]["r2"]["links"]["l1"]["interface"]
+ intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["interface"]
+ intf_f1_i2 = topo["routers"]["f1"]["links"]["i2"]["interface"]
+ intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"]
+ input_dict = [
+ {"dut": "l1", "src_address": "*", "iif": intf_l1_r2, "oil": intf_l1_i1},
+ {"dut": "l1", "src_address": source, "iif": intf_l1_r2, "oil": intf_l1_i1},
+ {"dut": "r2", "src_address": "*", "iif": "lo", "oil": intf_r2_l1},
+ {"dut": "r2", "src_address": source, "iif": intf_r2_f1, "oil": intf_r2_l1},
+ {"dut": "f1", "src_address": source, "iif": intf_f1_i2, "oil": intf_f1_r2},
+ ]
+
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verify 'show ip pim upstream' showing correct OIL and IIF" " on all the nodes"
+ )
+ for data in input_dict:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("joinRx value after join sent")
+ state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_after, dict
+ ), "Testcase {} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step(
+ "l1 sent PIM (*,G) join to r2 verify using"
+ "'show ip pim interface traffic' on RP connected interface"
+ )
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("l1 sent PIM (S,G) join to f1 , verify using 'show ip pim join'")
+ dut = "f1"
+ interface = intf_f1_r2
+ result = verify_ip_pim_join(tgen, topo, dut, interface, IGMP_JOIN)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_multicast_data_traffic_static_RP_send_traffic_then_join_p0(request):
+ """
+ TC_1_2: Verify Multicast data traffic with static RP, (*,g) and
+ (s,g) OIL updated correctly
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Start traffic first and then send the IGMP join")
+
+ step("Send multicast traffic from FRR3 to 225.1.1.1 receiver")
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)")
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("joinRx value before join sent")
+ state_dict = {"r2": {"r2-l1-eth2": ["joinRx"]}}
+ state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase {} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verify 'show ip mroute' showing correct RPF and OIF"
+ " interface for (*,G) and (S,G) entries on all the nodes"
+ )
+
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict = [
+ {"dut": "l1", "src_address": "*", "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
+ {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
+ {"dut": "r2", "src_address": "*", "iif": "lo", "oil": "r2-l1-eth2"},
+ {"dut": "r2", "src_address": source, "iif": "r2-f1-eth0", "oil": "r2-l1-eth2"},
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"},
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verify 'show ip pim upstream' showing correct OIL and IIF" " on all the nodes"
+ )
+ for data in input_dict:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("joinRx value after join sent")
+ state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_after, dict
+ ), "Testcase {} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step(
+ "l1 sent PIM (*,G) join to r2 verify using"
+ "'show ip pim interface traffic' on RP connected interface"
+ )
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("l1 sent PIM (S,G) join to f1 , verify using 'show ip pim join'")
+ dut = "f1"
+ interface = "f1-r2-eth3"
+ result = verify_ip_pim_join(tgen, topo, dut, interface, IGMP_JOIN)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_clear_pim_neighbors_and_mroute_p0(request):
+ """
+ TC_5: Verify (*,G) and (S,G) entry populated again after clear the
+ PIM nbr and mroute from FRR node
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure static RP on c1 for group (225.1.1.1-5)")
+ input_dict = {
+ "c1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Enable IGMP on FRR1 interface and send IGMP join 225.1.1.1 "
+ "to 225.1.1.5 from different interfaces"
+ )
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send multicast traffic from FRR3, wait for SPT switchover")
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, "i2", IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Clear the mroute on l1, wait for 5 sec")
+ result = clear_ip_mroute_verify(tgen, "l1")
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After clear ip mroute (*,g) entries are re-populated again"
+ " with same OIL and IIF, verify using 'show ip mroute' and "
+ " 'show ip pim upstream' "
+ )
+
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict = [
+ {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"}
+ ]
+
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
+ )
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verify 'show ip pim upstream' showing correct OIL and IIF" " on all the nodes"
+ )
+ for data in input_dict:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN
+ )
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_verify_mroute_when_same_receiver_in_FHR_LHR_and_RP_p0(request):
+ """
+ TC_9: Verify (s,g) timeout from FHR and RP when same receive
+ exist in LHR , FHR and RP
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1) to R1")
+
+ input_dict = {
+ "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}},
+ "r2": {"igmp": {"interfaces": {"r2-i3-eth1": {"igmp": {"version": "2"}}}}},
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0", "i3": "i3-r2-eth0"}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send multicast traffic from R3 to 225.1.1.1 receiver")
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("IGMP is received on FRR1 , FRR2 , FRR3, using " "'show ip igmp groups'")
+ igmp_groups = {"l1": "l1-i1-eth1", "r2": "r2-i3-eth1", "f1": "f1-i8-eth2"}
+ for dut, interface in igmp_groups.items():
+ result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) present on all the node with correct OIL" " using 'show ip mroute'")
+
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict = [
+ {"dut": "l1", "src_address": "*", "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
+ {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
+ {"dut": "r2", "src_address": "*", "iif": "lo", "oil": "r2-i3-eth1"},
+ {"dut": "r2", "src_address": source, "iif": "r2-f1-eth0", "oil": "r2-i3-eth1"},
+ {"dut": "f1", "src_address": "*", "iif": "f1-r2-eth3", "oil": "f1-i8-eth2"},
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"},
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_verify_mroute_when_same_receiver_joining_5_diff_sources_p0(request):
+ """
+ TC_19: Verify mroute detail when same receiver joining 5
+ different sources
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure static RP for (226.1.1.1-5) and (232.1.1.1-5)" " in c1")
+
+ _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
+ _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3
+
+ input_dict = {
+ "c1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": _GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Configure IGMP interface on FRR1 and FRR3 and send IGMP join"
+ "for group (226.1.1.1-5, 232.1.1.1-5)"
+ )
+
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i1", "i1-l1-eth0", _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i1", _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i8", "i8-f1-eth0", _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i8", _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Send multicast traffic from all the sources to all the "
+ "receivers (226.1.1.1-5, 232.1.1.1-5)"
+ )
+
+ input_traffic = {
+ "i6": "i6-l1-eth0",
+ "i7": "i7-l1-eth0",
+ "i3": "i3-r2-eth0",
+ "i4": "i4-c1-eth0",
+ "i5": "i5-c2-eth0",
+ }
+
+ for src, src_intf in input_traffic.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify (*,G) are created on FRR1 and FRR3 node " " 'show ip mroute' ")
+
+ source_i7 = topo["routers"]["i7"]["links"]["l1"]["ipv4"].split("/")[0]
+ source_i6 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
+ source_i5 = topo["routers"]["i5"]["links"]["c2"]["ipv4"].split("/")[0]
+ source_i3 = topo["routers"]["i3"]["links"]["r2"]["ipv4"].split("/")[0]
+ input_dict = [
+ {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"},
+ {
+ "dut": "l1",
+ "src_address": source_i5,
+ "iif": "l1-c1-eth0",
+ "oil": "l1-i1-eth1",
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i3,
+ "iif": "l1-r2-eth4",
+ "oil": "l1-i1-eth1",
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i6,
+ "iif": "l1-i6-eth2",
+ "oil": "l1-i1-eth1",
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i7,
+ "iif": "l1-i7-eth3",
+ "oil": "l1-i1-eth1",
+ },
+ {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"},
+ {
+ "dut": "f1",
+ "src_address": source_i5,
+ "iif": "f1-c2-eth0",
+ "oil": "f1-i8-eth2",
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i3,
+ "iif": "f1-r2-eth3",
+ "oil": "f1-i8-eth2",
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i6,
+ "iif": "f1-r2-eth3",
+ "oil": "f1-i8-eth2",
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i7,
+ "iif": "f1-r2-eth3",
+ "oil": "f1-i8-eth2",
+ },
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Stop the source one by one on FRR1")
+ input_intf = {"i6": "i6-l1-eth0", "i7": "i7-l1-eth0"}
+ for dut, intf in input_intf.items():
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step(
+ "After removing the source verify traffic is stopped"
+ " immediately and (S,G) got timeout in sometime"
+ )
+
+ logger.info("After shut, waiting for SG timeout")
+
+ input_dict = [
+ {
+ "dut": "l1",
+ "src_address": source_i6,
+ "iif": "l1-i6-eth2",
+ "oil": "l1-i1-eth1",
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i7,
+ "iif": "l1-i7-eth3",
+ "oil": "l1-i1-eth1",
+ },
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n mroutes are"
+ " still present \n Error: {}".format(tc_name, result)
+ logger.info("Expected Behavior: {}".format(result))
+
+ step(
+ "Source which is stopped got removed , other source"
+ " after still present verify using 'show ip mroute' "
+ )
+ input_dict = [
+ {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"},
+ {
+ "dut": "l1",
+ "src_address": source_i5,
+ "iif": "l1-c1-eth0",
+ "oil": "l1-i1-eth1",
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i3,
+ "iif": "l1-r2-eth4",
+ "oil": "l1-i1-eth1",
+ },
+ {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"},
+ {
+ "dut": "f1",
+ "src_address": source_i5,
+ "iif": "f1-c2-eth0",
+ "oil": "f1-i8-eth2",
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i3,
+ "iif": "f1-r2-eth3",
+ "oil": "f1-i8-eth2",
+ },
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Start all the source again for all the receivers")
+ input_intf = {"i6": "i6-l1-eth0", "i7": "i7-l1-eth0"}
+ for dut, intf in input_intf.items():
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step(
+ "After starting source all the mroute entries got populated, "
+ "no duplicate entries present in mroute verify 'show ip mroute'"
+ )
+
+ input_dict = [
+ {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"},
+ {
+ "dut": "l1",
+ "src_address": source_i5,
+ "iif": "l1-c1-eth0",
+ "oil": "l1-i1-eth1",
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i3,
+ "iif": "l1-r2-eth4",
+ "oil": "l1-i1-eth1",
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i6,
+ "iif": "l1-i6-eth2",
+ "oil": "l1-i1-eth1",
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i7,
+ "iif": "l1-i7-eth3",
+ "oil": "l1-i1-eth1",
+ },
+ {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"},
+ {
+ "dut": "f1",
+ "src_address": source_i5,
+ "iif": "f1-c2-eth0",
+ "oil": "f1-i8-eth2",
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i3,
+ "iif": "f1-r2-eth3",
+ "oil": "f1-i8-eth2",
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i6,
+ "iif": "f1-r2-eth3",
+ "oil": "f1-i8-eth2",
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i7,
+ "iif": "f1-r2-eth3",
+ "oil": "f1-i8-eth2",
+ },
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_verify_mroute_when_frr_is_transit_router_p2(request):
+ """
+ TC_16: Verify (*,G) and (S,G) populated correctly
+ when FRR is the transit router
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure static RP for (226.1.1.1-5) in c2")
+ input_dict = {
+ "c2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-5) to FRR1")
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send multicast traffic from FRR3 to 225.1.1.1-5 receivers")
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, "i2", IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ # Stop r2 router to make r2 router disabled from topology
+ input_intf = {"l1": "l1-r2-eth4", "f1": "f1-r2-eth3"}
+ for dut, intf in input_intf.items():
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step(
+ "FRR4 has (S,G) and (*,G) ,created where incoming interface"
+ " toward FRR3 and OIL toward R2, verify using 'show ip mroute'"
+ " 'show ip pim state' "
+ )
+
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict = [
+ {"dut": "c2", "src_address": "*", "iif": "lo", "oil": "c2-c1-eth0"},
+ {"dut": "c2", "src_address": source, "iif": "c2-f1-eth1", "oil": "c2-c1-eth0"},
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Stop multicast traffic from FRR3")
+ dut = "i2"
+ intf = "i2-f1-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ logger.info("Waiting for 20 sec to get traffic to be stopped..")
+ sleep(20)
+
+ step("top IGMP receiver from FRR1")
+ dut = "i1"
+ intf = "i1-l1-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ logger.info("Waiting for 20 sec to get mroutes to be flused out..")
+ sleep(20)
+
+ step(
+ "After stopping receiver (*,G) also got timeout from transit"
+ " router 'show ip mroute'"
+ )
+
+ result = verify_ip_mroutes(
+ tgen, "c1", "*", IGMP_JOIN, "c1-c2-eth1", "c1-l1-eth0", expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n mroutes are"
+ " still present \n Error: {}".format(tc_name, result)
+ logger.info("Expected Behavior: {}".format(result))
+
+ write_test_footer(tc_name)
+
+
+def test_verify_mroute_when_RP_unreachable_p1(request):
+ """
+ TC_23: Verify (S,G) should not create if RP is not reachable
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure RP on FRR2 (loopback interface) for " "the group range 225.0.0.0/8")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)")
+
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send multicast traffic from FRR3 to 225.1.1.1 receiver")
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Configure one IGMP interface on FRR3 node and send IGMP" " join (225.1.1.1)")
+ input_dict = {
+ "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i8", "i8-f1-eth0", GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i8", IGMP_JOIN, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ # Verify mroutes are present in FRR3(f1)
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict = [
+ {"dut": "f1", "src_address": "*", "iif": "f1-r2-eth3", "oil": "f1-i8-eth2"},
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"},
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut the RP connected interface from f1 ( r2 to f1) link")
+ dut = "f1"
+ intf = "f1-r2-eth3"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ logger.info("Waiting for 20 sec to get mroutes to be flushed out..")
+ sleep(20)
+
+ step("Clear the mroute on f1")
+ clear_ip_mroute(tgen, "f1")
+
+ step(
+ "After Shut the RP interface and clear the mroute verify all "
+ "(*,G) and (S,G) got timeout from FRR3 node , verify using "
+ " 'show ip mroute' "
+ )
+
+ result = verify_ip_mroutes(
+ tgen, "f1", "*", IGMP_JOIN, "f1-r2-eth3", "f1-i8-eth2", expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n mroutes are"
+ " still present \n Error: {}".format(tc_name, result)
+ logger.info("Expected Behavior: {}".format(result))
+
+ step("IGMP groups are present verify using 'show ip igmp group'")
+ dut = "l1"
+ interface = "l1-i1-eth1"
+ result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_modify_igmp_query_timer_p0(request):
+ """
+ TC_24:
+ Verify modification of IGMP query timer should get update
+ accordingly
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)")
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send multicast traffic from FRR3 to 225.1.1.1 receiver")
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verify 'show ip mroute' showing correct RPF and OIF"
+ " interface for (*,G) and (S,G) entries on all the nodes"
+ )
+
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict_4 = [
+ {"dut": "l1", "src_address": "*", "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
+ {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"},
+ ]
+ for data in input_dict_4:
+ result = verify_ip_mroutes(
+ tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verify 'show ip pim upstream' showing correct OIL and IIF" " on all the nodes"
+ )
+ for data in input_dict_4:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Modify IGMP query interval default to other timer on FRR1" "3 times")
+ input_dict_1 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {"igmp": {"query": {"query-interval": 100}}}
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = verify_igmp_config(tgen, input_dict_1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_dict_2 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {"igmp": {"query": {"query-interval": 200}}}
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = verify_igmp_config(tgen, input_dict_2)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_dict_3 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {"igmp": {"query": {"query-interval": 300}}}
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = verify_igmp_config(tgen, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ assert False, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_modify_igmp_max_query_response_timer_p0(request):
+ """
+ TC_25:
+ Verify modification of IGMP max query response timer
+ should get update accordingly
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)")
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure IGMP query response time to 10 sec on FRR1")
+ input_dict_1 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "igmp": {
+ "version": "2",
+ "query": {"query-max-response-time": 10},
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = verify_igmp_config(tgen, input_dict_1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send multicast traffic from FRR3 to 225.1.1.1 receiver")
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verify 'show ip mroute' showing correct RPF and OIF"
+ " interface for (*,G) and (S,G) entries on all the nodes"
+ )
+
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict_5 = [
+ {"dut": "l1", "src_address": "*", "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
+ {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"},
+ ]
+ for data in input_dict_5:
+ result = verify_ip_mroutes(
+ tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verify 'show ip pim upstream' showing correct OIL and IIF" " on all the nodes"
+ )
+ for data in input_dict_5:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Delete the PIM and IGMP on FRR1")
+ input_dict_1 = {"l1": {"pim": {"disable": ["l1-i1-eth1"]}}}
+ result = create_pim_config(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_dict_2 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "igmp": {
+ "version": "2",
+ "delete": True,
+ "query": {"query-max-response-time": 10, "delete": True},
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure PIM on FRR")
+ result = create_pim_config(tgen, topo["routers"])
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure max query response timer 100sec on FRR1")
+ input_dict_3 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "igmp": {
+ "version": "2",
+ "query": {"query-max-response-time": 100},
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = verify_igmp_config(tgen, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Remove and add max query response timer cli with different"
+ "timer 5 times on FRR1 Enable IGMP and IGMP version 2 on FRR1"
+ " on FRR1"
+ )
+
+ input_dict_3 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "igmp": {
+ "version": "2",
+ "query": {"query-max-response-time": 110},
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = verify_igmp_config(tgen, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_dict_3 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "igmp": {
+ "version": "2",
+ "query": {"query-max-response-time": 120},
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = verify_igmp_config(tgen, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_dict_3 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "igmp": {
+ "version": "2",
+ "query": {"query-max-response-time": 140},
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = verify_igmp_config(tgen, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_dict_3 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "igmp": {
+ "version": "2",
+ "query": {"query-max-response-time": 150},
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = verify_igmp_config(tgen, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP and IGMP version 2 on FRR1 on FRR1")
+
+ input_dict_4 = {
+ "l1": {"igmp": {"interfaces": {"l1-i1-eth1": {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ assert False, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/multicast-pim-sm-topo2/multicast_pim_sm_topo2.json b/tests/topotests/multicast-pim-sm-topo2/multicast_pim_sm_topo2.json
new file mode 100644
index 0000000000..71454c2ab2
--- /dev/null
+++ b/tests/topotests/multicast-pim-sm-topo2/multicast_pim_sm_topo2.json
@@ -0,0 +1,140 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32},
+ "routers": {
+ "l1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "i1": {"ipv4": "auto", "pim": "enable"},
+ "i6": {"ipv4": "auto", "pim": "enable"},
+ "i7": {"ipv4": "auto", "pim": "enable"},
+ "r2": {"ipv4": "auto", "pim": "enable"},
+ "c1": {"ipv4": "auto", "pim": "enable"}
+ },
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1" :{
+ "igmp":{
+ "version": "2"
+ }
+ }
+ }
+ },
+ "static_routes": [{
+ "network": ["1.0.5.17/32", "10.0.5.0/24", "10.0.7.0/24", "10.0.6.0/24", "10.0.9.0/24"],
+ "next_hop": "10.0.12.2"
+ },
+ {
+ "network": ["1.0.1.2/32", "1.0.3.5/32", "10.0.1.0/24", "1.0.2.2/32", "10.0.4.0/24", "10.0.3.0/24"],
+ "next_hop": "10.0.2.1"
+ }]
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "l1": {"ipv4": "auto", "pim": "enable"},
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "i3": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["10.0.5.0/24", "10.0.6.0/24", "1.0.2.2/32", "10.0.1.0/24"],
+ "next_hop": "10.0.7.1"
+ },
+ {
+ "network": ["1.0.1.2/32", "10.0.8.0/24", "10.0.10.0/24", "10.0.4.0/24", "10.0.0.0/24", "10.0.11.0/24", "10.0.1.0/24", "10.0.2.0/24"],
+ "next_hop": "10.0.12.1"
+ }]
+ },
+ "f1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2": {"ipv4": "auto", "pim": "enable"},
+ "c2": {"ipv4": "auto", "pim": "enable"},
+ "i2": {"ipv4": "auto", "pim": "enable"},
+ "i8": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["1.0.5.17/32", "10.0.8.0/24", "10.0.9.0/24", "10.0.10.0/24", "10.0.12.0/24", "10.0.11.0/24"],
+ "next_hop": "10.0.7.2"
+ },
+ {
+ "network": ["1.0.2.2/32", "10.0.1.0/24", "10.0.0.0/24", "10.0.4.0/24", "1.0.1.2/32"],
+ "next_hop": "10.0.3.1"
+ }]
+ },
+ "c1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "c2": {"ipv4": "auto", "pim": "enable"},
+ "l1": {"ipv4": "auto", "pim": "enable"},
+ "i4": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["1.0.5.17/32", "10.0.6.0/24", "10.0.3.0/24", "10.0.8.0/24", "10.0.9.0/24", "10.0.12.0/24", "10.0.10.0/24", "10.0.11.0/24"],
+ "next_hop": "10.0.2.2"
+ },
+ {
+ "network": ["10.0.5.0/24", "10.0.7.0/24", "1.0.3.5/32", "10.0.6.0/24", "1.0.2.2/32", "10.0.1.0/24", "10.0.4.0/24"],
+ "next_hop": "10.0.0.2"
+ }]
+ },
+ "c2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "c1": {"ipv4": "auto", "pim": "enable"},
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "i5": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["1.0.5.17/32", "10.0.5.0/24", "10.0.6.0/24", "10.0.7.0/24", "10.0.8.0/24", "10.0.9.0/24", "10.0.7.0/24", "10.0.10.0/24", "10.0.11.0/24"],
+ "next_hop": "10.0.3.2"
+ },
+ {
+ "network": ["1.0.1.2/32", "10.0.4.0/24", "10.0.2.0/24"],
+ "next_hop": "10.0.0.1"
+ }]
+ },
+ "i1": {
+ "links": {
+ "l1": {"ipv4": "auto"}
+ }
+ },
+ "i2": {
+ "links": {
+ "f1": {"ipv4": "auto"}
+ }
+ },
+ "i3": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i4": {
+ "links": {
+ "c1": {"ipv4": "auto"}
+ }
+ },
+ "i5": {
+ "links": {
+ "c2": {"ipv4": "auto"}
+ }
+ },
+ "i6": {
+ "links": {
+ "l1": {"ipv4": "auto"}
+ }
+ },
+ "i7": {
+ "links": {
+ "l1": {"ipv4": "auto"}
+ }
+ },
+ "i8": {
+ "links": {
+ "f1": {"ipv4": "auto"}
+ }
+ }
+ }
+}
diff --git a/tests/topotests/multicast-pim-sm-topo2/test_multicast_pim_sm_topo2.py b/tests/topotests/multicast-pim-sm-topo2/test_multicast_pim_sm_topo2.py
new file mode 100755
index 0000000000..a9d914da57
--- /dev/null
+++ b/tests/topotests/multicast-pim-sm-topo2/test_multicast_pim_sm_topo2.py
@@ -0,0 +1,1947 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2020 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test multicast pim sm:
+
+Test steps
+- Create topology (setup module)
+- Bring up topology
+
+Following tests are covered:
+1. TC_17: Verify (*,G) and (S,G) present and multicast traffic resume,
+ after restart of PIMd daemon
+2. TC_18: Verify (*,G) and (S,G) present and multicast traffic resume after
+ FRR service stop and start
+3. TC_10: Verify SPT switchover working when RPT and SPT path is
+ different
+4. TC_15: Verify (S,G) and (*,G) mroute after shut / no shut of upstream
+ interfaces
+5. TC_7: Verify mroute detail when receiver is present
+ outside of FRR
+6. TC_8: Verify mroute when FRR is acting as FHR and LHR
+7. TC_20: Verify mroute detail when 5 different receiver joining
+ same source
+8. TC_22: Verify OIL and IIF detail updated in (S,G) mroute after shut
+ and no shut of the source interface
+"""
+
+import os
+import sys
+import json
+import time
+import datetime
+from time import sleep
+import pytest
+
+pytestmark = pytest.mark.pimd
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ step,
+ iperfSendIGMPJoin,
+ addKernelRoute,
+ reset_config_on_routers,
+ iperfSendTraffic,
+ kill_iperf,
+ shutdown_bringup_interface,
+ kill_router_daemons,
+ start_router,
+ start_router_daemons,
+ stop_router,
+ required_linux_kernel_version,
+ topo_daemons,
+)
+from lib.pim import (
+ create_pim_config,
+ create_igmp_config,
+ verify_igmp_groups,
+ verify_ip_mroutes,
+ verify_pim_interface_traffic,
+ verify_upstream_iif,
+ verify_pim_neighbors,
+ verify_pim_state,
+ verify_ip_pim_join,
+ clear_ip_mroute,
+ clear_ip_pim_interface_traffic,
+ verify_igmp_config,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/multicast_pim_sm_topo2.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+TOPOLOGY = """
+
+
+ i4-----c1-------------c2---i5
+ | |
+ | |
+ i1-----l1------r2-----f1---i2
+ | | | |
+ | | | |
+ i7 i6 i3 i8
+
+ Description:
+ i1, i2, i3. i4, i5, i6, i7, i8 - FRR running iperf to send IGMP
+ join and traffic
+ l1 - LHR
+ f1 - FHR
+ r2 - FRR router
+ c1 - FRR router
+ c2 - FRR router
+"""
+
+# Global variables
+GROUP_RANGE = "225.0.0.0/8"
+IGMP_JOIN = "225.1.1.1"
+GROUP_RANGE_1 = [
+ "225.1.1.1/32",
+ "225.1.1.2/32",
+ "225.1.1.3/32",
+ "225.1.1.4/32",
+ "225.1.1.5/32",
+]
+IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"]
+GROUP_RANGE_2 = [
+ "226.1.1.1/32",
+ "226.1.1.2/32",
+ "226.1.1.3/32",
+ "226.1.1.4/32",
+ "226.1.1.5/32",
+]
+IGMP_JOIN_RANGE_2 = ["226.1.1.1", "226.1.1.2", "226.1.1.3", "226.1.1.4", "226.1.1.5"]
+
+GROUP_RANGE_3 = [
+ "227.1.1.1/32",
+ "227.1.1.2/32",
+ "227.1.1.3/32",
+ "227.1.1.4/32",
+ "227.1.1.5/32",
+]
+IGMP_JOIN_RANGE_3 = ["227.1.1.1", "227.1.1.2", "227.1.1.3", "227.1.1.4", "227.1.1.5"]
+
+
+class CreateTopo(Topo):
+ """
+ Test BasicTopo - topology 1
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function"""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.19")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+ logger.info("Master Topology: \n {}".format(TOPOLOGY))
+
+ logger.info("Running setup_module to create topology")
+
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False
+):
+ """
+ API to do pre-configuration to send IGMP join and multicast
+ traffic
+
+ parameters:
+ -----------
+ * `tgen`: topogen object
+ * `topo`: input json data
+ * `tc_name`: caller test case name
+ * `iperf`: router running iperf
+ * `iperf_intf`: interface name router running iperf
+ * `GROUP_RANGE`: group range
+ * `join`: IGMP join, default False
+ * `traffic`: multicast traffic, default False
+ """
+
+ if join:
+ # Add route to kernal
+ result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ if traffic:
+ # Add route to kernal
+ result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ router_list = tgen.routers()
+ for router in router_list.keys():
+ if router == iperf:
+ continue
+
+ rnode = router_list[router]
+ rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter")
+
+ return True
+
+
+def verify_state_incremented(state_before, state_after):
+ """
+ API to compare interface traffic state incrementing
+
+ Parameters
+ ----------
+ * `state_before` : State dictionary for any particular instance
+ * `state_after` : State dictionary for any particular instance
+ """
+
+ for router, state_data in state_before.items():
+ for state, value in state_data.items():
+ if state_before[router][state] >= state_after[router][state]:
+ errormsg = (
+ "[DUT: %s]: state %s value has not"
+ " incremented, Initial value: %s, "
+ "Current value: %s [FAILED!!]"
+ % (
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is "
+ "incremented, Initial value: %s, Current value: %s"
+ " [PASSED!!]",
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+
+ return True
+
+
+def test_verify_mroute_and_traffic_when_pimd_restarted_p2(request):
+ """
+ TC_17: Verify (*,G) and (S,G) present and multicast traffic resume,
+ after restart of PIMd daemon
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure static RP for (226.1.1.1-5) in c1")
+ step("Configure static RP for (232.1.1.1-5) in c2")
+
+ _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
+ _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3
+
+ input_dict = {
+ "c1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_2,
+ }
+ ]
+ }
+ },
+ "c2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_3,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Enable IGMP on FRR1 interface and send IGMP join "
+ "(226.1.1.1-5) and (232.1.1.1-5)"
+ )
+ step(
+ "Configure IGMP interface on FRR3 and send IGMP join"
+ " for group (226.1.1.1-5, 232.1.1.1-5)"
+ )
+
+ input_dict = {
+ "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0"}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Connect one source to c2 and send multicast traffic all"
+ " the receivers (226.1.1.1-5, 232.1.1.1-5)"
+ )
+ step(
+ "Send multicast traffic from FRR3 to all the receivers "
+ "(226.1.1.1-5, 232.1.1.1-5)"
+ )
+
+ input_src = {"i2": "i2-f1-eth0", "i5": "i5-c2-eth0"}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ # Verifying mroutes before PIMd restart, fetching uptime
+
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict = [
+ {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"},
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"},
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Restart Pimd process on FRR3 node")
+ kill_router_daemons(tgen, "f1", ["pimd"])
+ start_router_daemons(tgen, "f1", ["pimd"])
+
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After restart of PIMd verify pim nbr is up , IGMP groups"
+ " received , and (*,G) (S,G) entries populated again ,"
+ " Verify using 'show ip pim neighbor' , 'show ip igmp groups'"
+ " 'show ip mroute'"
+ )
+
+ result = verify_pim_neighbors(tgen, topo)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ dut = "f1"
+ interface = "f1-i8-eth2"
+ result = verify_igmp_groups(tgen, dut, interface, _IGMP_JOIN_RANGE)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Stop the traffic and restart PIMd immediately on FRR3 node")
+ dut = "i2"
+ intf = "i2-f1-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ kill_router_daemons(tgen, "f1", ["pimd"])
+ start_router_daemons(tgen, "f1", ["pimd"])
+
+ step(
+ "After PIM process come , all the none of (S,G) mroute should"
+ " present on FRR3 'show ip mroute' "
+ )
+
+ input_dict = [
+ {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"}
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ input_dict = [
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "none"}
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n mroutes are"
+ " still present \n Error: {}".format(tc_name, result)
+ logger.info("Expected Behavior: {}".format(result))
+
+ write_test_footer(tc_name)
+
+
+def test_verify_mroute_and_traffic_when_frr_restarted_p2(request):
+ """
+ TC_18: Verify (*,G) and (S,G) present and multicast traffic resume after
+ FRR service stop and start
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure static RP for (226.1.1.1-5) in c1")
+ step("Configure static RP for (232.1.1.1-5) in c2")
+
+ _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
+ _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3
+
+ input_dict = {
+ "c1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_2,
+ }
+ ]
+ }
+ },
+ "c2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_3,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Enable IGMP on FRR1 interface and send IGMP join "
+ "(226.1.1.1-5) and (232.1.1.1-5)"
+ )
+ step(
+ "Configure IGMP interface on FRR3 and send IGMP join"
+ " for group (226.1.1.1-5, 232.1.1.1-5)"
+ )
+
+ input_dict = {
+ "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0"}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Connect one source to c2 and send multicast traffic all"
+ " the receivers (226.1.1.1-5, 232.1.1.1-5)"
+ )
+ step(
+ "Send multicast traffic from FRR3 to all the receivers "
+ "(226.1.1.1-5, 232.1.1.1-5)"
+ )
+
+ input_src = {"i2": "i2-f1-eth0", "i5": "i5-c2-eth0"}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verifying mroutes before FRR restart, fetching uptime")
+
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict = [
+ {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"},
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"},
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Stop and Start the FRR services on FRR3 node")
+ stop_router(tgen, "f1")
+ start_router(tgen, "f1")
+
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After stop and start of FRR service verify pim nbr is up "
+ "IGMP groups received , and (*,G) (S,G) entries populated again"
+ " Verify using 'show ip pim neighbor' , 'show ip igmp groups'"
+ " 'show ip mroute'"
+ )
+
+ result = verify_pim_neighbors(tgen, topo)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ dut = "f1"
+ interface = "f1-i8-eth2"
+ result = verify_igmp_groups(tgen, dut, interface, _IGMP_JOIN_RANGE)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Stop the traffic and stop and start the FRR services on" " FRR3 node")
+ shutdown_bringup_interface(tgen, "i2", "i2-f1-eth0", False)
+
+ stop_router(tgen, "f1")
+ start_router(tgen, "f1")
+
+ step(
+ "After stop and start of FRR services , all the none of (S,G)"
+ " mroute should present on FRR3 node verify using "
+ "'show ip mroute'"
+ )
+
+ input_dict = [
+ {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"}
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ input_dict = [
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "none"}
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n mroutes are"
+ " still present \n Error: {}".format(tc_name, result)
+ logger.info("Expected Behavior: {}".format(result))
+
+ write_test_footer(tc_name)
+
+
+def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(request):
+ """
+ TC_10: Verify SPT switchover working when RPT and SPT path is
+ different
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure static RP for (226.1.1.1-5) and " "(232.1.1.1-5) in c2")
+
+ _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
+ _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3
+
+ input_dict = {
+ "c2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": _GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Enable IGMP on FRR1 interface and send IGMP join "
+ "(226.1.1.1-5) and (232.1.1.1-5)"
+ )
+
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i1", "i1-l1-eth0", _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i1", _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send multicast traffic from FRR3 to '226.1.1.1-5'" ", '232.1.1.1-5' receiver")
+
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i2", "i2-f1-eth0", _GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("registerRx and registerStopTx value before traffic sent")
+ state_dict = {"c2": {"c2-f1-eth1": ["registerRx", "registerStopTx"]}}
+ state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase {} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, "i2", _IGMP_JOIN_RANGE, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verify in FRR3 sending initial packet to RP using"
+ " 'show ip mroute' and mroute OIL is towards RP."
+ )
+
+ result = verify_ip_mroutes(
+ tgen,
+ "f1",
+ "10.0.5.2",
+ _IGMP_JOIN_RANGE,
+ "f1-i2-eth1",
+ ["f1-c2-eth0", "f1-r2-eth3"],
+ )
+ assert result is True, "Testcase {} : " "Failed Error: {}".format(tc_name, result)
+
+ result = verify_ip_mroutes(
+ tgen, "f1", "10.0.5.2", _IGMP_JOIN_RANGE, "f1-i2-eth1", "f1-r2-eth3"
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ " After spt switchover traffic is flowing between"
+ " (LHR(FRR1)-FHR(FRR3)) and (S,G) OIL is updated toward FRR1"
+ " 'show ip mroute' and 'show ip pim upstream'"
+ )
+
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict = [
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"},
+ {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Stop the traffic to all the receivers")
+
+ kill_iperf(tgen, "i2", "remove_traffic")
+
+ step(
+ "Null register packet being send periodically from FRR3 to RP, "
+ "verify using show ip mroute on RP, have (S, G) entries null OIL"
+ " 'show ip mroute' and verify show ip pim interface traffic"
+ "(In RP Register msg should be received and Register stop should"
+ " be transmitted)"
+ )
+ input_dict = [
+ {"dut": "c2", "src_address": source, "iif": "c2-f1-eth1", "oil": "none"}
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"]
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("registerRx and registerStopTx value after traffic sent")
+ state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_after, dict
+ ), "Testcase {} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_verify_mroute_after_shut_noshut_of_upstream_interface_p1(request):
+ """
+ TC_15: Verify (S,G) and (*,G) mroute after shut / no shut of upstream
+ interfaces
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure static RP for (226.1.1.1-5) in c1")
+ step("Configure static RP for (232.1.1.1-5) in c2")
+
+ _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
+ _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3
+
+ input_dict = {
+ "c1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_2,
+ }
+ ]
+ }
+ },
+ "c2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_3,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Enable IGMP on FRR1 interface and send IGMP join "
+ "(226.1.1.1-5) and (232.1.1.1-5)"
+ )
+ step(
+ "Configure IGMP interface on FRR3 and send IGMP join"
+ " for group (226.1.1.1-5, 232.1.1.1-5)"
+ )
+
+ input_dict = {
+ "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0"}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Connect one source to c2 and send multicast traffic all"
+ " the receivers (226.1.1.1-5, 232.1.1.1-5)"
+ )
+ step(
+ "Send multicast traffic from FRR3 to all the receivers "
+ "(226.1.1.1-5, 232.1.1.1-5)"
+ )
+
+ input_src = {"i2": "i2-f1-eth0", "i5": "i5-c2-eth0"}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "FRR3 (S,G) has one OIL for local receiver one toward c2"
+ " verify 'show ip mroute' and 'show ip pim upstream'"
+ )
+
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict = [
+ {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"},
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"},
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_2
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut and No shut interface connected from FHR (FRR3)" " to c2")
+ dut = "f1"
+ intf = "f1-c2-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("Shut and No shut interface connected from LHR (FRR1)" " to c1")
+ dut = "l1"
+ intf = "l1-c1-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut and No shut FRR1 and FRR3 interface")
+ shutdown_bringup_interface(tgen, "l1", "l1-r2-eth4", False)
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ shutdown_bringup_interface(tgen, "f1", "f1-r2-eth3", False)
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step(
+ "After shut/no shut of interface , verify traffic resume to all"
+ "the receivers (S,G) OIL update for all the receivers"
+ )
+
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Shut FRR1, FRR3 interface , clear mroute in FRR1"
+ " and No shut FRR1, FRR3 interface "
+ )
+ dut = "l1"
+ intf = "l1-r2-eth4"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ dut = "f1"
+ intf = "f1-r2-eth3"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ dut = "l1"
+ intf = "l1-r2-eth4"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ dut = "f1"
+ intf = "f1-r2-eth3"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ clear_ip_mroute(tgen, "l1")
+ clear_ip_mroute(tgen, "l1")
+
+ step(
+ "After no shut, verify traffic resume to all the receivers"
+ " (S,G) OIL update for all the receivers"
+ )
+
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Shut and no shut upstream interface from FRR1 to FRR2 and "
+ "cisco immediate after mroute/upstream got cleared"
+ )
+
+ dut = "l1"
+ intf_l1_r2 = "l1-r2-eth4"
+ shutdown_bringup_interface(tgen, dut, intf_l1_r2, False)
+
+ intf_l1_c1 = "l1-c1-eth0"
+ shutdown_bringup_interface(tgen, dut, intf_l1_c1, False)
+
+ done_flag = False
+ for retry in range(1, 11):
+ result = verify_upstream_iif(tgen, "l1", "Unknown", source, IGMP_JOIN_RANGE_2,
+ expected=False)
+ if result is not True:
+ done_flag = True
+ else:
+ continue
+
+ if done_flag:
+ logger.info("Expected Behavior: {}".format(result))
+ break
+
+ assert done_flag is True, (
+ "Testcase {} : Failed Error: \n "
+ "mroutes are still present, after waiting for 10 mins".format(tc_name)
+ )
+
+ step("No shut the Source interface just after the upstream is expired" " from FRR1")
+ shutdown_bringup_interface(tgen, dut, intf_l1_r2, True)
+ shutdown_bringup_interface(tgen, dut, intf_l1_c1, True)
+
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Stop the traffic to all the receivers")
+ kill_iperf(tgen)
+
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n mroutes are "
+ "still present \n Error: {}".format(tc_name, result)
+ logger.info("Expected Behavior: {}".format(result))
+
+ write_test_footer(tc_name)
+
+
+def test_verify_mroute_when_receiver_is_outside_frr_p0(request):
+ """
+ TC_7: Verify mroute detail when receiver is present
+ outside of FRR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure static RP on c1 for group range " "(226.1.1.1-5) and (232.1.1.1-5)")
+
+ _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
+ _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3
+
+ input_dict = {
+ "c1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": _GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Enable IGMP on FRR1 interface and send IGMP join"
+ " (226.1.1.1-5) and (232.1.1.1-5)"
+ )
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i1", "i1-l1-eth0", _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i1", _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Send multicast traffic from FRR3 to all the receivers "
+ "(226.1.1.1-5) and (232.1.1.1-5)"
+ )
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i2", "i2-f1-eth0", _GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, "i2", _IGMP_JOIN_RANGE, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Configure one more receiver in c2 enable IGMP and send"
+ " join (226.1.1.1-5) and (232.1.1.1-5)"
+ )
+ input_dict = {
+ "c2": {"igmp": {"interfaces": {"c2-i5-eth2": {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i5", "i5-c2-eth0", _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i5", _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("FRR1 has 10 (*.G) and 10 (S,G) verify using 'show ip mroute count'")
+ step(
+ "All the receiver are receiving traffic on FRR1 and (S,G) OIL is toward"
+ "receivers, verify using 'show ip mroute' 'show ip pim upstream'"
+ )
+ step(
+ "All the receiver are receiving traffic on c2 and (S,G) OIL is "
+ "toward receivers, verify using 'show ip mroute' 'show ip pim upstream'"
+ )
+
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict = [
+ {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"},
+ {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
+ {"dut": "c2", "src_address": "*", "iif": "c2-c1-eth0", "oil": "c2-i5-eth2"},
+ {"dut": "c2", "src_address": source, "iif": "c2-f1-eth1", "oil": "c2-i5-eth2"},
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "FRR3 has (S,G) OIL created toward c1/c2 receiver and FRR1 receiver"
+ "'show ip pim state'"
+ )
+ input_dict = [
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-c2-eth0"},
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"},
+ ]
+ for data in input_dict:
+ result = verify_pim_state(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["oil"],
+ _IGMP_JOIN_RANGE,
+ data["src_address"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_verify_mroute_when_FRR_is_FHR_and_LHR_p0(request):
+ """
+ TC_8: Verify mroute when FRR is acting as FHR and LHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure static RP for group range (226.1.1.1-5) and " "(232.1.1.1-5) on c1")
+ _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
+ _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3
+
+ input_dict = {
+ "c1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": _GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Enable IGMP on FRR1 interface and send IGMP join (226.1.1.1-5)"
+ " and (232.1.1.1-5)"
+ )
+ step(
+ "Configure receiver on FRR3 with igmp and pim enabled and "
+ "send IGMP join (226.1.1.1-5) and (232.1.1.1-5)"
+ )
+ step(
+ "Send multicast traffic from FRR3 to all the receivers "
+ "(226.1.1.1-5) and (232.1.1.1-5)"
+ )
+
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i1", "i1-l1-eth0", _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i2", "i2-f1-eth0", _GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP join (226.1.1.1-5, 232.1.1.1-5) to LHR(l1)")
+ result = iperfSendIGMPJoin(tgen, "i1", _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send multicast traffic from FRR3 to '226.1.1.1-5'" ", '232.1.1.1-5' receiver")
+ result = iperfSendTraffic(tgen, "i2", _IGMP_JOIN_RANGE, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Configure receiver in f1 enable IGMP and send"
+ " join (226.1.1.1-5) and (232.1.1.1-5)"
+ )
+
+ step("Configure one IGMP interface on f1 node and send IGMP" " join (225.1.1.1)")
+ input_dict = {
+ "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, "i8", "i8-f1-eth0", _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, "i8", _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+ step(
+ "l1 and f1 has 10 IGMP groups (226.1.1.1-5, 232.1.1.1-5),"
+ " verify using 'show ip igmp groups'"
+ )
+ dut = "l1"
+ interface = "l1-i1-eth1"
+ result = verify_igmp_groups(tgen, dut, interface, _IGMP_JOIN_RANGE)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ dut = "f1"
+ interface = "f1-i8-eth2"
+ result = verify_igmp_groups(tgen, dut, interface, _IGMP_JOIN_RANGE)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "l1 , f1 has 10 (*,G) and 10 (S,G) for groups "
+ "(226.1.1.1-5, 232.1.1.1-5), verify using "
+ " 'show ip mroute'"
+ )
+
+ source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict = [
+ {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"},
+ {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"},
+ {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"},
+ {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Join timer is running in FHR and LHR , verify using" " 'show ip pim state'")
+
+ for data in input_dict:
+ result = verify_pim_state(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["oil"],
+ _IGMP_JOIN_RANGE,
+ data["src_address"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ # Stop the multicast traffic
+ step("Stop the traffic to all the receivers")
+ kill_iperf(tgen)
+
+ step(
+ "After traffic stopped , verify (*,G) entries are not flushed"
+ " out from FRR1 node verify using 'show ip mroute' "
+ )
+
+ input_dict = [
+ {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"},
+ {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"},
+ ]
+
+ done_flag = False
+ for retry in range(1, 11):
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+
+ if result is True:
+ done_flag = True
+ else:
+ continue
+
+ if done_flag:
+ break
+
+ assert done_flag is True, (
+ "Testcase {} : Failed Error: \n "
+ "mroutes are still present, after waiting for 10 mins".format(tc_name)
+ )
+
+ step(
+ "After traffic stopped , verify (S,G) entries are flushed out"
+ " from FRR1 node verify using 'show ip mroute' "
+ )
+
+ input_dict = [
+ {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
+ {"dut": "f1", "src_address": source, "iif": "i2-f1-eth0", "oil": "f1-r2-eth3"},
+ ]
+
+ done_flag = False
+ for retry in range(1, 11):
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ expected=False
+ )
+ if result is not True:
+ done_flag = True
+ else:
+ continue
+
+ if done_flag:
+ logger.info("Expected Behavior: {}".format(result))
+ break
+
+ assert done_flag is True, (
+ "Testcase {} : Failed Error: \n "
+ "mroutes are still present, after waiting for 10 mins".format(tc_name)
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_mroute_when_5_different_receiver_joining_same_sources_p0(request):
+ """
+ TC_20: Verify mroute detail when 5 different receiver joining
+ same source
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure static RP for (226.1.1.1-5) in c1")
+ step("Configure static RP for (232.1.1.1-5) in c2")
+
+ _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
+ _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3
+
+ input_dict = {
+ "c1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_2,
+ }
+ ]
+ }
+ },
+ "c2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_3,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Configure 2 IGMP interface on FRR1 and send IGMP join"
+ "for group (226.1.1.1-5, 232.1.1.1-5) from both the interface"
+ )
+ step(
+ "Configure 2 IGMP interface on FRR3 and send IGMP join for"
+ " group (226.1.1.1-5, 232.1.1.1-5) from both the interface"
+ )
+ step(
+ "Configure 1 IGMP interface on c2 and send IGMP join for"
+ "group (226.1.1.1-5, 232.1.1.1-5)"
+ )
+
+ input_dict = {
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {"igmp": {"version": "2"}},
+ "f1-i2-eth1": {"igmp": {"version": "2"}},
+ }
+ }
+ },
+ "l1": {"igmp": {"interfaces": {"l1-i6-eth2": {"igmp": {"version": "2"}}}}},
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {
+ "i1": "i1-l1-eth0",
+ "i6": "i6-l1-eth0",
+ "i8": "i8-f1-eth0",
+ "i2": "i2-f1-eth0",
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+ step("Configure one source in FRR2 , one in c1")
+ step(
+ "Send multicast traffic from both the sources to all the"
+ "receivers (226.1.1.1-5, 232.1.1.1-5)"
+ )
+
+ input_src = {"i3": "i3-r2-eth0"}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ step(
+ "After all the IGMP groups received with correct port using"
+ " 'show ip igmp groups' in FRR1, FRR3, c2"
+ )
+ dut = "l1"
+ interface = "l1-i6-eth2"
+ result = verify_igmp_groups(tgen, dut, interface, _IGMP_JOIN_RANGE)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ dut = "f1"
+ interface = "f1-i8-eth2"
+ result = verify_igmp_groups(tgen, dut, interface, _IGMP_JOIN_RANGE)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(*,G) entries got created with upstream interface RP connected"
+ " port using 'show ip pim upstream' in FRR1, FRR3, c2"
+ )
+ step(
+ "(S,G) entries created for all the receiver after starting the"
+ " source , traffic is reaching to all the receiver , verify OIL"
+ " of (S,G) is receiver port using 'show ip mroute' in FRR1, "
+ "FRR3 c2"
+ )
+
+ source = topo["routers"]["i3"]["links"]["r2"]["ipv4"].split("/")[0]
+ input_dict_all = [
+ {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
+ {"dut": "f1", "src_address": source, "iif": "f1-r2-eth3", "oil": "f1-i8-eth2"},
+ ]
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut the receiver interface one by one on FRR1 node")
+ shutdown_bringup_interface(tgen, "l1", "l1-i1-eth1", False)
+ shutdown_bringup_interface(tgen, "l1", "l1-i6-eth2", False)
+
+ step(
+ "After shut the receiver port verify traffic is stopped immediately"
+ " and (S,G) got timeout immediately in FRR1, FRR3, c2"
+ )
+ input_dict = [
+ {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n mroutes are"
+ " still present \n Error: {}".format(tc_name, result)
+ logger.info("Expected Behavior: {}".format(result))
+
+ step(
+ "No traffic impact observed on other receivers verify using"
+ " 'show ip mroute' "
+ )
+ input_dict = [
+ {"dut": "f1", "src_address": source, "iif": "f1-r2-eth3", "oil": "f1-i8-eth2"}
+ ]
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("No shut the receiver interface one by one on FRR1 node")
+ shutdown_bringup_interface(tgen, "l1", "l1-i1-eth1", True)
+ shutdown_bringup_interface(tgen, "l1", "l1-i6-eth2", True)
+
+ step(
+ "After no shut of receivers all the mroute entries got populated"
+ ", no duplicate entries present in mroute"
+ )
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_verify_oil_iif_for_mroute_after_shut_noshut_source_interface_p1(request):
+ """
+ TC_22: Verify OIL and IIF detail updated in (S,G) mroute after shut
+ and no shut of the source interface
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure static RP for (226.1.1.1-5) in c1")
+ step("Configure static RP for (232.1.1.1-5) in c2")
+
+ _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
+ _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3
+
+ input_dict = {
+ "c1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_2,
+ }
+ ]
+ }
+ },
+ "c2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_3,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Configure IGMP interface on FRR1 and FRR3 and send IGMP join"
+ " for group (226.1.1.1-5, 232.1.1.1-5)"
+ )
+
+ input_dict = {
+ "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0"}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure 1 source in FRR1 , 1 in FRR3")
+ step(
+ "Send multicast traffic from both the sources to all the "
+ "receivers (226.1.1.1-5, 232.1.1.1-5)"
+ )
+
+ input_src = {"i6": "i6-l1-eth0", "i2": "i2-f1-eth0"}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "*,G) is created and (S,G) created on FRR1 and FRR3 for both"
+ " the source verify using 'show ip mroute' and "
+ " 'show ip pim upstream' to check the upstream interface"
+ " details"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict_all = [
+ {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"},
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": "l1-r2-eth4",
+ "oil": "l1-i1-eth1",
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i6,
+ "iif": "l1-i6-eth2",
+ "oil": "l1-i1-eth1",
+ },
+ {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"},
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": "f1-i2-eth1",
+ "oil": "f1-i8-eth2",
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i6,
+ "iif": "f1-r2-eth3",
+ "oil": "f1-i8-eth2",
+ },
+ ]
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut the source interface one by one on FRR1")
+ shutdown_bringup_interface(tgen, "f1", "f1-i2-eth1", False)
+
+ step(
+ "After shut of ource interface from FRR3 verify all the (S,G) "
+ "entries flushed out from FRR3 node 'show ip pim upstream' "
+ " 'show ip mroute' "
+ )
+
+ result = verify_ip_mroutes(
+ tgen,
+ "f1",
+ source_i2,
+ _IGMP_JOIN_RANGE,
+ "f1-i2-eth1",
+ "f1-i8-eth2",
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n mroutes are"
+ " still present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behavior: {}".format(result))
+
+ result = verify_upstream_iif(
+ tgen, "f1", "Unknown", "10.0.5.2", _IGMP_JOIN_RANGE, joinState="NotJoined"
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo3.json b/tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo3.json
new file mode 100644
index 0000000000..f582f4929d
--- /dev/null
+++ b/tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo3.json
@@ -0,0 +1,140 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32},
+ "routers": {
+ "l1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "i1": {"ipv4": "auto", "pim": "enable"},
+ "i6": {"ipv4": "auto", "pim": "enable"},
+ "i7": {"ipv4": "auto", "pim": "enable"},
+ "r2": {"ipv4": "auto", "pim": "enable"},
+ "c1": {"ipv4": "auto", "pim": "enable"}
+ },
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1" :{
+ "igmp":{
+ "version": "2"
+ }
+ }
+ }
+ },
+ "static_routes": [{
+ "network": ["1.0.5.17/32", "10.0.5.0/24", "10.0.6.0/24", "10.0.9.0/24", "1.0.3.5/32"],
+ "next_hop": "10.0.12.2"
+ },
+ {
+ "network": ["1.0.1.2/32", "1.0.3.5/32", "10.0.1.0/24", "1.0.2.2/32", "10.0.4.0/24"],
+ "next_hop": "10.0.2.1"
+ }]
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "l1": {"ipv4": "auto", "pim": "enable"},
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "i3": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["10.0.5.0/24", "10.0.6.0/24", "1.0.2.2/32", "10.0.1.0/24", "1.0.3.5/32"],
+ "next_hop": "10.0.7.1"
+ },
+ {
+ "network": ["1.0.1.2/32", "10.0.8.0/24", "10.0.10.0/24", "10.0.4.0/24", "10.0.11.0/24", "10.0.1.0/24"],
+ "next_hop": "10.0.12.1"
+ }]
+ },
+ "f1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2": {"ipv4": "auto", "pim": "enable"},
+ "c2": {"ipv4": "auto", "pim": "enable"},
+ "i2": {"ipv4": "auto", "pim": "enable"},
+ "i8": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["1.0.5.17/32", "10.0.8.0/24", "10.0.9.0/24", "10.0.10.0/24", "10.0.11.0/24", "10.0.12.0/24"],
+ "next_hop": "10.0.7.2"
+ },
+ {
+ "network": ["1.0.2.2/32", "10.0.1.0/24", "10.0.4.0/24", "1.0.1.2/32"],
+ "next_hop": "10.0.3.1"
+ }]
+ },
+ "c1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "c2": {"ipv4": "auto", "pim": "enable"},
+ "l1": {"ipv4": "auto", "pim": "enable"},
+ "i4": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["1.0.5.17/32", "10.0.6.0/24", "10.0.8.0/24", "10.0.9.0/24", "10.0.10.0/24", "10.0.11.0/24"],
+ "next_hop": "10.0.2.2"
+ },
+ {
+ "network": ["10.0.5.0/24", "10.0.7.0/24", "1.0.3.5/32", "10.0.6.0/24", "1.0.2.2/32", "10.0.1.0/24", "10.0.4.0/24"],
+ "next_hop": "10.0.0.2"
+ }]
+ },
+ "c2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "c1": {"ipv4": "auto", "pim": "enable"},
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "i5": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["1.0.5.17/32", "10.0.5.0/24", "10.0.6.0/24", "10.0.7.0/24", "10.0.8.0/24", "10.0.9.0/24", "10.0.10.0/24", "10.0.11.0/24"],
+ "next_hop": "10.0.3.2"
+ },
+ {
+ "network": ["1.0.1.2/32", "10.0.4.0/24"],
+ "next_hop": "10.0.0.1"
+ }]
+ },
+ "i1": {
+ "links": {
+ "l1": {"ipv4": "auto"}
+ }
+ },
+ "i2": {
+ "links": {
+ "f1": {"ipv4": "auto"}
+ }
+ },
+ "i3": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i4": {
+ "links": {
+ "c1": {"ipv4": "auto"}
+ }
+ },
+ "i5": {
+ "links": {
+ "c2": {"ipv4": "auto"}
+ }
+ },
+ "i6": {
+ "links": {
+ "l1": {"ipv4": "auto"}
+ }
+ },
+ "i7": {
+ "links": {
+ "l1": {"ipv4": "auto"}
+ }
+ },
+ "i8": {
+ "links": {
+ "f1": {"ipv4": "auto"}
+ }
+ }
+ }
+}
diff --git a/tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo4.json b/tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo4.json
new file mode 100644
index 0000000000..4635dac7d2
--- /dev/null
+++ b/tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo4.json
@@ -0,0 +1,137 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32},
+ "routers": {
+ "l1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "i1": {"ipv4": "auto", "pim": "enable"},
+ "i6": {"ipv4": "auto", "pim": "enable"},
+ "i7": {"ipv4": "auto", "pim": "enable"},
+ "r2": {"ipv4": "auto", "pim": "enable"},
+ "c1": {"ipv4": "auto", "pim": "enable"}
+ },
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1" :{
+ "igmp":{
+ "version": "2"
+ }
+ }
+ }
+ },
+ "static_routes": [{
+ "network": ["10.0.4.0/24", "10.0.3.1/24"],
+ "next_hop": "10.0.12.2"
+ },
+ {
+ "network": ["10.0.1.2/24"],
+ "next_hop": "10.0.2.1"
+ }]
+
+ },
+
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "l1": {"ipv4": "auto", "pim": "enable"},
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "i3": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["10.0.4.0/24","10.0.3.1/24"],
+ "next_hop": "10.0.7.1"
+ },
+ {
+ "network": ["1.0.4.11/32", "10.0.2.1/24", "10.0.1.2/24"],
+ "next_hop": "10.0.12.1"
+ }]
+ },
+ "f1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2": {"ipv4": "auto", "pim": "enable"},
+ "c2": {"ipv4": "auto", "pim": "enable"},
+ "i2": {"ipv4": "auto", "pim": "enable"},
+ "i8": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["10.0.4.0/24","10.0.3.1/24"],
+ "next_hop": "10.0.3.1"
+ },
+ {
+ "network": ["1.0.4.11/32", "10.0.2.1/24", "10.0.1.2/24"],
+ "next_hop": "10.0.7.2"
+ }]
+ },
+ "c1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "c2": {"ipv4": "auto", "pim": "enable"},
+ "l1": {"ipv4": "auto", "pim": "enable"},
+ "i4": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [{
+ "network": ["1.0.4.11/32","10.0.4.2/24", "10.0.3.1/24"],
+ "next_hop": "10.0.2.2"
+ }]
+
+
+ },
+ "c2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "c1": {"ipv4": "auto", "pim": "enable"},
+ "f1": {"ipv4": "auto", "pim": "enable"},
+ "i5": {"ipv4": "auto", "pim": "enable"}
+ },
+ "static_routes": [
+ {
+ "network": ["1.0.4.11/32", "10.0.2.1/24", "10.0.1.2/24"],
+ "next_hop": "10.0.3.2"
+ }]
+ },
+ "i1": {
+ "links": {
+ "l1": {"ipv4": "auto"}
+ }
+ },
+ "i2": {
+ "links": {
+ "f1": {"ipv4": "auto"}
+ }
+ },
+ "i3": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i4": {
+ "links": {
+ "c1": {"ipv4": "auto"}
+ }
+ },
+ "i5": {
+ "links": {
+ "c2": {"ipv4": "auto"}
+ }
+ },
+ "i6": {
+ "links": {
+ "l1": {"ipv4": "auto"}
+ }
+ },
+ "i7": {
+ "links": {
+ "l1": {"ipv4": "auto"}
+ }
+ },
+ "i8": {
+ "links": {
+ "f1": {"ipv4": "auto"}
+ }
+ }
+ }
+}
diff --git a/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo3.py b/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo3.py
new file mode 100755
index 0000000000..fdceb77fd1
--- /dev/null
+++ b/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo3.py
@@ -0,0 +1,4609 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2020 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test multicast pim sm:
+
+Test steps
+- Create topology (setup module)
+- Bring up topology
+
+Following tests are covered:
+
+1. verify oil when join prune sent scenario_1 p0
+2. verify oil when join prune sent scenario_2 p0
+3. shut noshut source interface when upstream cleared from LHR p0(
+4. shut noshut receiver interface when upstream cleared from LHR p0(
+5. verify igmp clis p0
+6. verify igmp cli generate query once p0
+7. verify remove add igmp config to receiver interface p0
+8. verify remove add igmp commands when pim configured p0
+9. verify remove add pim commands when igmp configured p0
+10. pim dr priority p0
+11. pim hello timer p0
+12. Verify mroute after removing RP sending IGMP prune p2
+13. Verify prune is sent to LHR and FHR when PIM nbr went down
+14. Verify mroute flag in LHR and FHR node
+15. Verify IGMP prune processed correctly when same join received from IGMP and PIM
+16. Verify multicast traffic flowing fine, when LHR connected to RP
+17. Verify multicast traffic is flowing fine when FHR is connected to RP
+"""
+
+import os
+import re
+import sys
+import json
+import time
+import datetime
+import pytest
+
+pytestmark = pytest.mark.pimd
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ step,
+ iperfSendIGMPJoin,
+ addKernelRoute,
+ reset_config_on_routers,
+ iperfSendTraffic,
+ kill_iperf,
+ shutdown_bringup_interface,
+ kill_router_daemons,
+ start_router,
+ start_router_daemons,
+ stop_router,
+ apply_raw_config,
+ add_interfaces_to_vlan,
+ tcpdump_capture_start,
+ tcpdump_capture_stop,
+ LOGDIR,
+ check_router_status,
+ required_linux_kernel_version,
+ topo_daemons,
+)
+from lib.pim import (
+ create_pim_config,
+ create_igmp_config,
+ verify_igmp_groups,
+ verify_ip_mroutes,
+ clear_ip_mroute_verify,
+ clear_ip_mroute,
+ clear_ip_pim_interface_traffic,
+ verify_igmp_config,
+ verify_pim_neighbors,
+ verify_pim_config,
+ verify_pim_interface,
+ verify_upstream_iif,
+ verify_multicast_traffic,
+ verify_pim_rp_info,
+ get_refCount_for_mroute,
+ verify_multicast_flag_state,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/multicast_pim_sm_topo3.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+TOPOLOGY = """
+
+ i4-----c1-------------c2---i5
+ | |
+ | |
+ i1-----l1------r2-----f1---i2
+ | | | |
+ | | | |
+ i7 i6 i3 i8
+
+ Description:
+ i1, i2, i3. i4, i5, i6, i7, i8 - FRR running iperf to send IGMP
+ join and traffic
+ l1 - LHR
+ f1 - FHR
+ r2 - FRR router
+ c1 - FRR router
+ c2 - FRR router
+"""
+
+# Global variables
+VLAN_1 = 2501
+GROUP_RANGE = "225.0.0.0/8"
+IGMP_GROUP = "225.1.1.1/32"
+IGMP_JOIN = "225.1.1.1"
+VLAN_INTF_ADRESS_1 = "10.0.8.3/24"
+GROUP_RANGE_1 = [
+ "225.1.1.1/32",
+ "225.1.1.2/32",
+ "225.1.1.3/32",
+ "225.1.1.4/32",
+ "225.1.1.5/32",
+]
+IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"]
+GROUP_RANGE_2 = [
+ "226.1.1.1/32",
+ "226.1.1.2/32",
+ "226.1.1.3/32",
+ "226.1.1.4/32",
+ "226.1.1.5/32",
+]
+IGMP_JOIN_RANGE_2 = ["226.1.1.1", "226.1.1.2", "226.1.1.3", "226.1.1.4", "226.1.1.5"]
+GROUP_RANGE_3 = [
+ "227.1.1.1/32",
+ "227.1.1.2/32",
+ "227.1.1.3/32",
+ "227.1.1.4/32",
+ "227.1.1.5/32",
+]
+IGMP_JOIN_RANGE_3 = ["227.1.1.1", "227.1.1.2", "227.1.1.3", "227.1.1.4", "227.1.1.5"]
+
+SAME_VLAN_IP_1 = {"ip": "10.1.1.1", "subnet": "255.255.255.0", "cidr": "24"}
+SAME_VLAN_IP_2 = {"ip": "10.1.1.2", "subnet": "255.255.255.0", "cidr": "24"}
+SAME_VLAN_IP_3 = {"ip": "10.1.1.3", "subnet": "255.255.255.0", "cidr": "24"}
+SAME_VLAN_IP_4 = {"ip": "10.1.1.4", "subnet": "255.255.255.0", "cidr": "24"}
+TCPDUMP_FILE = "{}/{}".format(LOGDIR, "v2query.txt")
+
+
+class CreateTopo(Topo):
+ """
+ Test BasicTopo - topology 1
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function"""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.19")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+ logger.info("Master Topology: \n {}".format(TOPOLOGY))
+
+ logger.info("Running setup_module to create topology")
+
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False
+):
+ """
+ API to do pre-configuration to send IGMP join and multicast
+ traffic
+
+ parameters:
+ -----------
+ * `tgen`: topogen object
+ * `topo`: input json data
+ * `tc_name`: caller test case name
+ * `iperf`: router running iperf
+ * `iperf_intf`: interface name router running iperf
+ * `GROUP_RANGE`: group range
+ * `join`: IGMP join, default False
+ * `traffic`: multicast traffic, default False
+ """
+
+ if join:
+ # Add route to kernal
+ result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ if traffic:
+ # Add route to kernal
+ result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ router_list = tgen.routers()
+ for router in router_list.keys():
+ if router == iperf:
+ continue
+
+ rnode = router_list[router]
+ rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter")
+
+ for router in topo["routers"].keys():
+ if "static_routes" in topo["routers"][router]:
+ static_routes = topo["routers"][router]["static_routes"]
+ for static_route in static_routes:
+ network = static_route["network"]
+ next_hop = static_route["next_hop"]
+ if type(network) is not list:
+ network = [network]
+ for net in network:
+ addKernelRoute(tgen, router, iperf_intf, net, next_hop)
+ return True
+
+
+def verify_mroute_repopulated(uptime_before, uptime_after):
+ """
+ API to compare uptime for mroutes
+
+ Parameters
+ ----------
+ * `uptime_before` : Uptime dictionary for any particular instance
+ * `uptime_after` : Uptime dictionary for any particular instance
+ """
+
+ for group in uptime_before.keys():
+ for source in uptime_before[group].keys():
+ if set(uptime_before[group]) != set(uptime_after[group]):
+ errormsg = (
+ "mroute (%s, %s) has not come"
+ " up after mroute clear [FAILED!!]" % (source, group)
+ )
+ return errormsg
+
+ d1 = datetime.datetime.strptime(uptime_before[group][source], "%H:%M:%S")
+ d2 = datetime.datetime.strptime(uptime_after[group][source], "%H:%M:%S")
+ if d2 >= d1:
+ errormsg = "mroute (%s, %s) is not " "repopulated [FAILED!!]" % (
+ source,
+ group,
+ )
+ return errormsg
+
+ logger.info("mroute (%s, %s) is " "repopulated [PASSED!!]", source, group)
+
+ return True
+
+
+def verify_state_incremented(state_before, state_after):
+ """
+ API to compare interface traffic state incrementing
+
+ Parameters
+ ----------
+ * `state_before` : State dictionary for any particular instance
+ * `state_after` : State dictionary for any particular instance
+ """
+
+ for router, state_data in state_before.items():
+ for state, value in state_data.items():
+ if state_before[router][state] >= state_after[router][state]:
+ errormsg = (
+ "[DUT: %s]: state %s value has not"
+ " incremented, Initial value: %s, "
+ "Current value: %s [FAILED!!]"
+ % (
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is "
+ "incremented, Initial value: %s, Current value: %s"
+ " [PASSED!!]",
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+
+ return True
+
+
+def find_v2_query_msg_in_tcpdump(tgen, router, message, count, cap_file):
+ """
+ Find v2 query messages in tcpdump file
+
+ Parameters
+ ----------
+ * `tgen` : Topology handler
+ * `router` : Device under test
+ * `cap_file` : tcp dump file name
+
+ """
+
+ filepath = os.path.join(LOGDIR, tgen.modname, router, cap_file)
+ with open(filepath) as f:
+ if len(re.findall("{}".format(message), f.read())) < count:
+ errormsg = "[DUT: %s]: Verify Message: %s in tcpdump" " [FAILED!!]" % (
+ router,
+ message,
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: Found message: %s in tcpdump " " count: %s [PASSED!!]",
+ router,
+ message,
+ count,
+ )
+ return True
+
+
+def find_tos_in_tcpdump(tgen, router, message, cap_file):
+ """
+ Find v2 query messages in tcpdump file
+
+ Parameters
+ ----------
+ * `tgen` : Topology handler
+ * `router` : Device under test
+ * `cap_file` : tcp dump file name
+
+ """
+
+ filepath = os.path.join(LOGDIR, tgen.modname, router, cap_file)
+ with open(filepath) as f:
+
+ if len(re.findall(message, f.read())) < 1:
+ errormsg = "[DUT: %s]: Verify Message: %s in tcpdump" " [FAILED!!]" % (
+ router,
+ message,
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: Found message: %s in tcpdump " "[PASSED!!]", router, message
+ )
+ return True
+
+
+def test_verify_oil_when_join_prune_sent_scenario_1_p1(request):
+ """
+ TC_21_1:
+ Verify OIL detail updated in (S,G) and (*,G) mroute when IGMP
+ join/prune is sent
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3")
+ step(
+ "Enable IGMP of FRR1 interface and send IGMP joins "
+ " from FRR1 node for group range (226.1.1.1-5)"
+ )
+ step(
+ "Enable IGMP of FRR3 interface and send IGMP joins "
+ " from FRR3 node for group range (226.1.1.1-5)"
+ )
+
+ intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
+ input_dict = {
+ "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["l1"]["interface"],
+ "i8": topo["routers"]["i8"]["links"]["f1"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (226.1.1.1-5) in R2")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Configure one source on FRR3 for all the groups and send" " multicast traffic"
+ )
+
+ input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict_all = [
+ {
+ "dut": "l1",
+ "src_address": "*",
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": "lo",
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r2"]["links"]["f1"]["interface"],
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": "*",
+ "iif": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["f1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ },
+ ]
+
+ step("Verify mroutes and iff upstream")
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send the IGMP prune from ixia to (226.1.1.1-5) receiver on " "FRR1 node")
+
+ intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ shutdown_bringup_interface(tgen, "l1", intf_l1_i1, False)
+
+ step(
+ "After receiving the IGMP prune from FRR1 , verify traffic "
+ "immediately stopped for this receiver 'show ip multicast'"
+ )
+
+ input_traffic = {"l1": {"traffic_sent": [intf_l1_i1]}}
+ result = verify_multicast_traffic(tgen, input_traffic, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ " Traffic is not stopped yet \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ step(
+ "IGMP groups are remove from FRR1 node 'show ip igmp groups'"
+ " FRR3 IGMP still present"
+ )
+
+ dut = "l1"
+ result = verify_igmp_groups(
+ tgen, dut, intf_l1_i1, IGMP_JOIN_RANGE_1, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "IGMP groups are not deleted \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ dut = "f1"
+ result = verify_igmp_groups(tgen, dut, intf_f1_i8, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(*,G) and (S,G) OIL got removed immediately after receiving"
+ " prune 'show ip pim state' and 'show ip mroute' on FRR1 node,"
+ " no impact on FRR3 receiver"
+ )
+
+ input_dict_l1 = [
+ {
+ "dut": "l1",
+ "src_address": "*",
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ ]
+
+ step("Verify mroutes and iff upstream")
+
+ for data in input_dict_l1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "mroutes are still present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ for data in input_dict_l1:
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "upstream entries are still present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ input_dict_f1 = [
+ {
+ "dut": "f1",
+ "src_address": "*",
+ "iif": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["f1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ },
+ ]
+
+ step("Verify mroutes and iff upstream")
+
+ for data in input_dict_f1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_f1:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send the IGMP prune from ixia to (226.1.1.1-5) receiver on " " FRR3 node")
+
+ intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
+ shutdown_bringup_interface(tgen, "f1", intf_f1_i8, False)
+
+ step(
+ "After receiving the IGMP prune from FRR3s , verify traffic "
+ "immediately stopped for this receiver 'show ip multicast'"
+ )
+
+ input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}}
+ result = verify_multicast_traffic(tgen, input_traffic, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ " Traffic is not stopped yet \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ step(
+ "IGMP groups are remove from FRR1 node 'show ip igmp groups'"
+ " FRR3 IGMP still present"
+ )
+
+ dut = "f1"
+ result = verify_igmp_groups(
+ tgen, dut, intf_f1_i8, IGMP_JOIN_RANGE_1, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "IGMP groups are not deleted \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ step(
+ "(*,G) and (S,G) OIL got prune state (none) from all the nodes"
+ "FRR1, FRR3 verify using 'show ip mroute'"
+ )
+
+ input_dict_l1 = [
+ {
+ "dut": "l1",
+ "src_address": "*",
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ ]
+
+ step("Verify mroutes and iff upstream")
+
+ for data in input_dict_l1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "mroutes are still present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ for data in input_dict_l1:
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "upstream entries are still present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ shutdown_bringup_interface(tgen, "f1", intf_f1_i8, True)
+ shutdown_bringup_interface(tgen, "l1", intf_l1_i1, True)
+
+ for data in input_dict_l1:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_verify_oil_when_join_prune_sent_scenario_2_p1(request):
+ """
+ TC_21_2: Verify OIL detail updated in (S,G) and (*,G) mroute when IGMP
+ join/prune is sent
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Removing FRR3 to simulate topo " "FHR(FRR1)---LHR(FRR2)")
+
+ intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"]
+ intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["interface"]
+ intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"]
+ shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False)
+ shutdown_bringup_interface(tgen, "f1", intf_f1_c2, False)
+ shutdown_bringup_interface(tgen, "f1", intf_f1_r2, False)
+
+ step("Enable the PIM on all the interfaces of FRR1, FRR2")
+ step(
+ "Enable IGMP of FRR1 interface and send IGMP joins "
+ " from FRR1 node for group range (226.1.1.1-5)"
+ )
+ step(
+ "Enable IGMP of FRR3 interface and send IGMP joins "
+ " from FRR3 node for group range (226.1.1.1-5)"
+ )
+
+ intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"]
+ input_dict = {
+ "r2": {"igmp": {"interfaces": {intf_r2_i3: {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["l1"]["interface"],
+ "i3": topo["routers"]["i3"]["links"]["r2"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (226.1.1.1-5) in R2")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ input_dict_all = [
+ {
+ "dut": "l1",
+ "src_address": "*",
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": "lo",
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": "lo",
+ "oil": topo["routers"]["r2"]["links"]["i3"]["interface"],
+ },
+ ]
+
+ step("Verify mroutes and iff upstream")
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send the IGMP prune from ixia to (226.1.1.1-5) receiver on " "FRR3(r2) node")
+
+ intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"]
+ shutdown_bringup_interface(tgen, "r2", intf_r2_i3, False)
+
+ step(
+ "After sending IGMP prune from FRR3(r2) node verify (*,G) OIL "
+ "immediately removed for local receiver mroute should have "
+ " PIM protocol , IGMP should be removed verify using "
+ "'show ip mroute' no impact seen on FRR1(l1) (*,G)"
+ )
+
+ input_dict_r2 = [
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": "lo",
+ "oil": topo["routers"]["r2"]["links"]["i3"]["interface"],
+ }
+ ]
+
+ for data in input_dict_r2:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "mroutes are still present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ input_dict_l1_r2 = [
+ {
+ "dut": "l1",
+ "src_address": "*",
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": "lo",
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ ]
+
+ step("Verify mroutes and iff upstream")
+
+ for data in input_dict_l1_r2:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send the IGMP prune from ixia to (226.1.1.1-5) receiver on " "FRR1(l1) node")
+
+ intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ shutdown_bringup_interface(tgen, "l1", intf_l1_i1, False)
+
+ step(
+ "After sending IGMP prune from FRR1 node verify (*,G) OIL"
+ "got removed immediately from FRR1 node"
+ )
+
+ input_dict_l1 = [
+ {
+ "dut": "l1",
+ "src_address": "*",
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ for data in input_dict_l1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "mroutes are still present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ step("After prune is sent verify upstream got removed in FRR1 node")
+
+ for data in input_dict_l1:
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "upstream entries are still present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ write_test_footer(tc_name)
+
+
+def test_shut_noshut_source_interface_when_upstream_cleared_from_LHR_p1(request):
+ """
+ TC_26: Verify shut/no shut of source interface after upstream got cleared
+ from LHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable the PIM on all the interfaces of FRR1, R2 and FRR3" " routers")
+ step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
+
+ input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP on R2 (loopback interface) for " "the group range 225.0.0.0/8")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send multicast traffic from FRR3 to 225.1.1.1-225.1.1.10" " receiver")
+
+ input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "'show ip mroute' showing correct RPF and OIF interface for (*,G)"
+ " and (S,G) entries on all the nodes"
+ )
+
+ source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict_all = [
+ {
+ "dut": "l1",
+ "src_address": "*",
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": "lo",
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r2"]["links"]["f1"]["interface"],
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["f1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ },
+ ]
+
+ step(
+ "'show ip pim upstream' and 'show ip pim upstream-rpf' showing"
+ " correct OIL and IIF on all the nodes"
+ )
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut the source interface from FRR3")
+ intf_f1_i2 = topo["routers"]["f1"]["links"]["i2"]["interface"]
+ shutdown_bringup_interface(tgen, "f1", intf_f1_i2, False)
+
+ step(
+ "After shut of source interface verify (S,G) mroutes are cleared"
+ " from all the nodes"
+ )
+
+ intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"]
+ result = verify_ip_mroutes(
+ tgen, "f1", source_i2, IGMP_JOIN_RANGE_1, intf_f1_i2, intf_f1_r2, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n mroutes are"
+ " still present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behavior: {}".format(result))
+
+ step(
+ "After waiting for (S,G) timeout from FRR1 for same"
+ " source verify that (S,G) is flushed from FRR1 node"
+ " 'show ip pim upstream' 'show ip mroute' "
+ )
+
+ done_flag = False
+ for retry in range(1, 11):
+ result = verify_upstream_iif(
+ tgen, "l1", "Unknown", source_i2, IGMP_JOIN_RANGE_1, expected=False
+ )
+ if result is not True:
+ done_flag = True
+ else:
+ continue
+ if done_flag:
+ logger.info("Expected Behavior: {}".format(result))
+ break
+
+ assert done_flag is True, (
+ "Testcase {} : Failed Error: \n "
+ "mroutes are still present, after waiting for 10 mins".format(tc_name)
+ )
+
+ step("No shut the Source interface just after the upstream is expired" " from FRR1")
+ shutdown_bringup_interface(tgen, "f1", intf_f1_i2, True)
+
+ step(
+ "After no shut of source interface , verify all the (S,G) is "
+ " populated again on 'show ip mroute' 'show ip pim upstream' "
+ " with proper OIL and IIF detail"
+ )
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("shut and no shut the source interface immediately")
+ shutdown_bringup_interface(tgen, "f1", intf_f1_i2, False)
+ shutdown_bringup_interface(tgen, "f1", intf_f1_i2, True)
+
+ step(
+ "All the mroutes got updated with proper OIL after no shut of"
+ "interface verify using 'show ip mroute'"
+ )
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_shut_noshut_receiver_interface_when_upstream_cleared_from_LHR_p1(request):
+ """
+ TC_27: Verify shut/no shut of receiver interface after upstream got
+ cleared from LHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable the PIM on all the interfaces of FRR1, R2 and FRR3" " routers")
+ step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
+
+ input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP on R2 (loopback interface) for " "the group range 225.0.0.0/8")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send multicast traffic from FRR3 to 225.1.1.1-225.1.1.10" " receiver")
+
+ input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "'show ip mroute' showing correct RPF and OIF interface for (*,G)"
+ " and (S,G) entries on all the nodes"
+ )
+
+ source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict_all = [
+ {
+ "dut": "l1",
+ "src_address": "*",
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": "lo",
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r2"]["links"]["f1"]["interface"],
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["f1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ },
+ ]
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "'show ip pim upstream' and 'show ip pim upstream-rpf' showing"
+ " correct OIL and IIF on all the nodes"
+ )
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut the source interface FRR1")
+ intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ intf_f1_i2 = topo["routers"]["f1"]["links"]["i2"]["interface"]
+ intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"]
+ shutdown_bringup_interface(tgen, "l1", intf_l1_i1, False)
+
+ step(
+ "After waiting for (S,G) timeout from FRR1 for same"
+ " source verify that (S,G) is flushed from FRR1 node"
+ " 'show ip pim upstream' 'show ip mroute' "
+ )
+
+ done_flag = False
+ for retry in range(1, 11):
+ result = verify_upstream_iif(
+ tgen, "l1", "Unknown", source_i2, IGMP_JOIN_RANGE_1, expected=False
+ )
+ if result is not True:
+ done_flag = True
+ else:
+ continue
+ if done_flag:
+ logger.info("Expected Behavior: {}".format(result))
+ break
+
+ assert done_flag is True, (
+ "Testcase {} : Failed Error: \n "
+ "mroutes are still present, after waiting for 10 mins".format(tc_name)
+ )
+
+ step("No shut the Source interface just after the upstream is expired" " from FRR1")
+ shutdown_bringup_interface(tgen, "l1", intf_l1_i1, True)
+
+ step(
+ "After no shut of source interface , verify all the (S,G) is "
+ " populated again on 'show ip mroute' 'show ip pim upstream' "
+ " with proper OIL and IIF detail"
+ )
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("shut and no shut the source interface immediately")
+ shutdown_bringup_interface(tgen, "f1", intf_f1_i2, False)
+ shutdown_bringup_interface(tgen, "f1", intf_f1_i2, True)
+
+ step(
+ "After no shut of receiver interface , verify all the (S,G) is "
+ "populated again on 'show ip mroute' 'show ip pim upstream' "
+ "with proper OIL and IIF detail"
+ )
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_verify_remove_add_igmp_config_to_receiver_interface_p0(request):
+ """
+ TC_33: Verify removing and adding IGMP config from the receiver interface
+ """
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable PIM on all routers")
+ step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
+
+ input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Configure source on FRR3 and start the traffic for" " (225.1.1.1-225.1.1.10)")
+
+ input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Configure source on FRR1 and start the traffic for" " (225.1.1.1-225.1.1.10)")
+
+ input_src = {"i6": topo["routers"]["i6"]["links"]["l1"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i6 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict_all = [
+ {
+ "dut": "l1",
+ "src_address": "*",
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i6,
+ "iif": topo["routers"]["l1"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": "lo",
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["f1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ },
+ ]
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Remove igmp 'no ip igmp' and 'no ip igmp version 2' from"
+ " receiver interface of FRR1"
+ )
+
+ intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ input_dict_2 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {intf_l1_i1: {"igmp": {"version": "2", "delete": True,}}}
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("IGMP join removed from FRR1 , verify using " "'show ip igmp groups json'")
+
+ dut = "l1"
+ interface = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \n Groups are not"
+ " present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"]
+ intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"]
+ intf_f1_i2 = topo["routers"]["f1"]["links"]["i2"]["interface"]
+ input_traffic = {
+ "l1": {"traffic_received": [intf_l1_r2], "traffic_sent": [intf_l1_i1]},
+ "f1": {"traffic_sent": [intf_f1_r2], "traffic_received": [intf_f1_i2]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Configure igmp 'ip igmp' and 'ip igmp version 2' from "
+ "receiver interface of FRR1"
+ )
+
+ input_dict_2 = {
+ "l1": {"igmp": {"interfaces": {intf_l1_i1: {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After adding IGMP on receiver interface verify (S,G) and (*,G)"
+ " entries got populated and traffic is resumed on FRR1 and FRR3 node"
+ )
+
+ step(
+ "Verify OIL/IIF and drJoinDesired using 'show ip mroute , and traffic"
+ " using show ip pim upstream and show ip multicast'"
+ )
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ assert False, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Remove igmp 'no ip igmp' and 'no ip igmp version 2' from"
+ " receiver interface of FRR1"
+ )
+
+ input_dict_2 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {intf_l1_i1: {"igmp": {"version": "2", "delete": True,}}}
+ }
+ }
+ }
+
+ result = create_igmp_config(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("IGMP join removed from FRR1 , verify using " "'show ip igmp groups json'")
+
+ dut = "l1"
+ interface = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \n Groups are not"
+ " present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Configure igmp 'ip igmp' and 'ip igmp version 2' from "
+ "receiver interface of FRR1"
+ )
+
+ input_dict_2 = {
+ "l1": {"igmp": {"interfaces": {intf_l1_i1: {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After adding IGMP on receiver interface verify (S,G) and (*,G)"
+ " entries got populated and traffic is resumed on FRR1 and FRR3 node"
+ )
+
+ step(
+ "Verify OIL/IIF and drJoinDesired using 'show ip mroute , and traffic"
+ " using show ip pim upstream and show ip multicast'"
+ )
+
+ input_dict_l1_f1 = [
+ {
+ "dut": "l1",
+ "src_address": "*",
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i6,
+ "iif": topo["routers"]["l1"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["f1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ },
+ ]
+
+ for data in input_dict_l1_f1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_l1_f1:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ assert False, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Remove ip igmp and send igmp prune from FRR1 interface")
+
+ input_dict_2 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {intf_l1_i1: {"igmp": {"version": "2", "delete": True,}}}
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+ step(
+ "Verification: After removing igmp 'no ip igmp' and "
+ " sending prune verify mroute and upstream got removed"
+ " from FRR1 verify using 'show ip mroute' and "
+ "'show ip pim upstream'"
+ )
+
+ dut = "l1"
+ iif = topo["routers"]["l1"]["links"]["i6"]["interface"]
+ oil = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ source = source_i6
+ result = verify_ip_mroutes(
+ tgen, dut, source, IGMP_JOIN_RANGE_1, iif, oil, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n routes are still"
+ " present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ write_test_footer(tc_name)
+
+
+def test_verify_remove_add_igmp_commands_when_pim_configured_p0(request):
+ """
+ TC_34: Verify removing and adding IGMP commands when PIM is already
+ configured
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable PIM on all routers")
+ step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
+
+ input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Configure source on FRR3 and start the traffic for" " (225.1.1.1-225.1.1.10)")
+
+ input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Configure source on FRR1 and start the traffic for" " (225.1.1.1-225.1.1.10)")
+
+ input_src = {"i6": topo["routers"]["i6"]["links"]["l1"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i6 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict_all = [
+ {
+ "dut": "l1",
+ "src_address": "*",
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i6,
+ "iif": topo["routers"]["l1"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": "lo",
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["f1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ },
+ ]
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verification: After configuring IGMP related config , "
+ "verify config is present in the interface "
+ "'show ip igmp interface ensxx json'"
+ )
+
+ intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ input_dict_1 = {
+ "l1": {"igmp": {"interfaces": {intf_l1_i1: {"igmp": {"version": "2"}}}}}
+ }
+
+ result = verify_igmp_config(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Remove igmp 'no ip igmp' and 'no ip igmp version 2' from"
+ " receiver interface of FRR1"
+ )
+
+ input_dict_2 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {intf_l1_i1: {"igmp": {"version": "2", "delete": True,}}}
+ }
+ }
+ }
+
+ result = create_igmp_config(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verification: After removing the config CLI got removed "
+ "'show ip igmp interface ensxx json'"
+ )
+
+ result = verify_igmp_config(tgen, input_dict_1, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "IGMP interface is not removed \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ step("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ assert False, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure 'ip igmp last-member-query-count 10' on FRR1" " receiver interface")
+
+ input_dict_3 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {"igmp": {"query": {"last-member-query-count": 5}}}
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = verify_igmp_config(tgen, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Remove 'ip igmp last-member-query-count 10' on FRR1" " receiver interface")
+
+ input_dict_3 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "igmp": {
+ "query": {"last-member-query-count": "", "delete": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_dict_3 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {"igmp": {"query": {"last-member-query-count": 2}}}
+ }
+ }
+ }
+ }
+ result = verify_igmp_config(tgen, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ assert False, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Configure 'ip igmp last-member-query-interval 20' on FRR1"
+ " receiver interface"
+ )
+
+ input_dict_3 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "igmp": {"query": {"last-member-query-interval": 20}}
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = verify_igmp_config(tgen, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Remove 'ip igmp last-member-query-count 10' on FRR1" " receiver interface")
+
+ input_dict_3 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "igmp": {
+ "query": {"last-member-query-interval": "", "delete": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_dict_3 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "igmp": {"query": {"last-member-query-interval": 10}}
+ }
+ }
+ }
+ }
+ }
+ result = verify_igmp_config(tgen, input_dict_3)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ assert False, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_verify_remove_add_pim_commands_when_igmp_configured_p1(request):
+ """
+ TC_35: Verify removing and adding PIM commands when IGMP is already
+ configured
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure 'ip pim' on receiver interface on FRR1")
+ step("Enable PIM on all routers")
+ step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
+
+ input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Remove 'no ip pim' on receiver interface on FRR1")
+
+ intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ input_dict_1 = {"l1": {"pim": {"disable": intf_l1_i1}}}
+ result = create_pim_config(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ assert False, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure 'ip pim bsm' on receiver interface on FRR1")
+
+ raw_config = {
+ "l1": {"raw_config": ["interface {}".format(intf_l1_i1), "ip pim bsm"]}
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Remove 'no ip pim bsm' on receiver interface on FRR1")
+
+ raw_config = {
+ "l1": {"raw_config": ["interface {}".format(intf_l1_i1), "no ip pim bsm"]}
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ assert False, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure 'ip pim drpriority' on receiver interface on FRR1")
+
+ raw_config = {
+ "l1": {
+ "raw_config": ["interface {}".format(intf_l1_i1), "ip pim drpriority 10"]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verification: After configuring PIM related config, "
+ "verify config is present in the interface "
+ "'show ip pim interface ensxx json'"
+ )
+
+ input_dict_dr = {"l1": {"pim": {"interfaces": {intf_l1_i1: {"drPriority": 10}}}}}
+ result = verify_pim_config(tgen, input_dict_dr)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Remove 'no ip pim drpriority' on receiver interface on FRR1")
+
+ raw_config = {
+ "l1": {
+ "raw_config": ["interface {}".format(intf_l1_i1), "no ip pim drpriority 10"]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verification: After removing the config CLI got removed "
+ "'show ip pim interface ensxx json'"
+ )
+
+ input_dict_dr = {"l1": {"pim": {"interfaces": {intf_l1_i1: {"drPriority": 1}}}}}
+ result = verify_pim_config(tgen, input_dict_dr)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ assert False, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure 'ip pim hello' on receiver interface on FRR1")
+
+ raw_config = {
+ "l1": {"raw_config": ["interface {}".format(intf_l1_i1), "ip pim hello 50"]}
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verification: After configuring PIM related config, "
+ "verify config is present in the interface "
+ "'show ip pim interface ensxx json'"
+ )
+
+ input_dict_dr = {"l1": {"pim": {"interfaces": {intf_l1_i1: {"helloPeriod": 50}}}}}
+ result = verify_pim_config(tgen, input_dict_dr)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Remove 'no ip pim hello' on receiver interface on FRR1")
+
+ raw_config = {
+ "l1": {"raw_config": ["interface {}".format(intf_l1_i1), "no ip pim hello"]}
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verification: After removing the config CLI got removed "
+ "'show ip pim interface ensxx json'"
+ )
+
+ input_dict_dr = {"l1": {"pim": {"interfaces": {intf_l1_i1: {"helloPeriod": 30}}}}}
+ result = verify_pim_config(tgen, input_dict_dr)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ assert False, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure 'ip pim unicast-bsm' on receiver interface on FRR1")
+
+ raw_config = {
+ "l1": {"raw_config": ["interface {}".format(intf_l1_i1), "ip pim unicast-bsm"]}
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Remove 'no ip pim hello' on receiver interface on FRR1")
+
+ raw_config = {
+ "l1": {
+ "raw_config": ["interface {}".format(intf_l1_i1), "no ip pim unicast-bsm"]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ assert False, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_pim_dr_priority_p0(request):
+ """
+ TC_36: Verify highest DR priority become the PIM DR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure 'ip pim' on receiver interface on FRR1")
+ step("Enable PIM on all routers")
+ step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
+
+ input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict_all = [
+ {
+ "dut": "l1",
+ "src_address": "*",
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": "lo",
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["f1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ },
+ ]
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Configure 'ip pim drpriority 10' on receiver interface on FRR1(LHR)")
+
+ intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"]
+ raw_config = {
+ "l1": {
+ "raw_config": ["interface {}".format(intf_l1_r2), "ip pim drpriority 10"]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "DR config is successful on FRR1 node , verify using "
+ " 'show ip pim interface json'"
+ )
+
+ input_dict_dr = {"l1": {"pim": {"interfaces": {intf_l1_r2: {"drPriority": 10}}}}}
+ result = verify_pim_config(tgen, input_dict_dr)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Configure 'ip pim drpriority 20' on receiver interface on FRR3(FHR)")
+
+ intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"]
+ raw_config = {
+ "f1": {
+ "raw_config": ["interface {}".format(intf_f1_r2), "ip pim drpriority 20"]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "DR config is successful on FRR3 node , verify using "
+ " 'show ip pim interface json'"
+ )
+
+ input_dict_dr = {"f1": {"pim": {"interfaces": {intf_f1_r2: {"drPriority": 20}}}}}
+ result = verify_pim_config(tgen, input_dict_dr)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "PIM is enable on FRR1, FRR2 interface and neighbor is up, "
+ " verify using 'show ip pim interface'"
+ )
+
+ result = verify_pim_interface(tgen, topo, "l1")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_pim_interface(tgen, topo, "f1")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Highet IP become PIM DR , verify using "
+ "'show ip pim interface json' and 'show ip pim neighbor'"
+ )
+ step("Highest priority become PIM DR")
+
+ dr_address = topo["routers"]["l1"]["links"]["r2"]["ipv4"].split("/")[0]
+ input_dict_dr = {
+ "l1": {"pim": {"interfaces": {intf_l1_r2: {"drAddress": dr_address}}}}
+ }
+ result = verify_pim_config(tgen, input_dict_dr)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ dr_address = topo["routers"]["f1"]["links"]["r2"]["ipv4"].split("/")[0]
+ input_dict_dr = {
+ "f1": {"pim": {"interfaces": {intf_f1_r2: {"drAddress": dr_address}}}}
+ }
+ result = verify_pim_config(tgen, input_dict_dr)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Remove 'no ip pim drpriority' on receiver interface on FRR1")
+
+ raw_config = {
+ "l1": {
+ "raw_config": ["interface {}".format(intf_l1_r2), "no ip pim drpriority 10"]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Remove 'no ip pim drpriority' on receiver interface on FRR3")
+
+ raw_config = {
+ "f1": {
+ "raw_config": ["interface {}".format(intf_f1_r2), "no ip pim drpriority 20"]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After removing drpriority , config got removed from both the "
+ "nodes and highest IP become PIM DR"
+ )
+
+ input_dict_dr = {"l1": {"pim": {"interfaces": {intf_l1_r2: {"drPriority": 1}}}}}
+ result = verify_pim_config(tgen, input_dict_dr)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ input_dict_dr = {"f1": {"pim": {"interfaces": {intf_f1_r2: {"drPriority": 1}}}}}
+ result = verify_pim_config(tgen, input_dict_dr)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ dr_address = topo["routers"]["r2"]["links"]["l1"]["ipv4"].split("/")[0]
+ input_dict_dr = {
+ "l1": {"pim": {"interfaces": {intf_l1_r2: {"drAddress": dr_address}}}}
+ }
+ result = verify_pim_config(tgen, input_dict_dr)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ dr_address = topo["routers"]["r2"]["links"]["f1"]["ipv4"].split("/")[0]
+ input_dict_dr = {
+ "f1": {"pim": {"interfaces": {intf_f1_r2: {"drAddress": dr_address}}}}
+ }
+ result = verify_pim_config(tgen, input_dict_dr)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_pim_hello_timer_p1(request):
+ """
+ TC_37: Verify PIM hello is sent on configured timer
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Configure 'ip pim' on receiver interface on FRR1")
+ step("Enable PIM on all routers")
+ step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
+
+ input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Configure PIM hello interval timer 100 on FRR1 node (FRR1-FRR2 link)")
+
+ intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"]
+ raw_config = {
+ "l1": {"raw_config": ["interface {}".format(intf_l1_r2), "ip pim hello 100"]}
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "PIM hello interval is configured on interface verify using "
+ "'show ip pim interface'"
+ )
+
+ input_dict_hello = {
+ "l1": {"pim": {"interfaces": {intf_l1_r2: {"helloPeriod": 100}}}}
+ }
+ result = verify_pim_config(tgen, input_dict_hello)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Modify hello timer to 180 and then 50sec")
+
+ intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"]
+ raw_config = {
+ "l1": {"raw_config": ["interface {}".format(intf_l1_r2), "ip pim hello 180"]}
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "PIM hello interval is configured on interface verify using "
+ "'show ip pim interface'"
+ )
+
+ input_dict_hello = {
+ "l1": {"pim": {"interfaces": {intf_l1_r2: {"helloPeriod": 180}}}}
+ }
+ result = verify_pim_config(tgen, input_dict_hello)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"]
+ raw_config = {
+ "l1": {"raw_config": ["interface {}".format(intf_l1_r2), "ip pim hello 50"]}
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "PIM hello interval is configured on interface verify using "
+ "'show ip pim interface'"
+ )
+
+ input_dict_hello = {
+ "l1": {"pim": {"interfaces": {intf_l1_r2: {"helloPeriod": 50}}}}
+ }
+ result = verify_pim_config(tgen, input_dict_hello)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ assert False, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroute_after_removing_RP_sending_IGMP_prune_p2(request):
+ """
+ TC_39 Verify mroute after removing the RP and sending IGMP prune
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ step(
+ "Remove cisco connected link to simulate topo "
+ "LHR(FRR1(f1))----RP(cisco(f1)---FHR(FRR3(l1))"
+ )
+
+ intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"]
+ intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["interface"]
+ shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False)
+ shutdown_bringup_interface(tgen, "f1", intf_f1_c2, False)
+
+ step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3")
+ step(
+ "Enable IGMP of FRR1 interface and send IGMP joins "
+ " from FRR1 node for group range (225.1.1.1-5)"
+ )
+
+ intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
+ input_dict = {
+ "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {"i8": topo["routers"]["i8"]["links"]["f1"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (225.1.1.1-5) as R2")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Send traffic from FHR to all the groups ( 225.1.1.1 to 225.1.1.5) and send"
+ " multicast traffic"
+ )
+
+ input_src = {"i6": topo["routers"]["i6"]["links"]["l1"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i2 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
+
+ input_dict_all = [
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": "*",
+ "iif": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ },
+ ]
+
+ step("Verify mroutes and iff upstream")
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Remove the RP config for both the range from all the nodes")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ input_dict_starg = [
+ {
+ "dut": "f1",
+ "src_address": "*",
+ "iif": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ }
+ ]
+
+ input_dict_sg = [
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ },
+ ]
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "mroute still present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ for data in input_dict_sg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send prune from receiver-1 (using ctrl+c) on iperf interface")
+ kill_iperf(tgen)
+
+ intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
+ input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}}
+ traffic_before = verify_multicast_traffic(
+ tgen, input_traffic, return_traffic=True, expected=False
+ )
+ assert isinstance(traffic_before, dict), (
+ "Testcase {} : Failed \n traffic_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+ )
+
+ step("IGMP groups are remove from FRR1 node 'show ip igmp groups'")
+
+ dut = "f1"
+ result = verify_igmp_groups(
+ tgen, dut, intf_f1_i8, IGMP_JOIN_RANGE_1, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "IGMP groups still present still present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ step(
+ "After receiving the IGMP prune from FRR1 , verify traffic "
+ "immediately stopped for this receiver 'show ip multicast'"
+ )
+
+ intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
+ input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}}
+ traffic_after = verify_multicast_traffic(
+ tgen, input_traffic, return_traffic=True, expected=False
+ )
+ assert isinstance(traffic_after, dict), (
+ "Testcase {} : Failed \n traffic_after is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+ )
+
+ result = verify_state_incremented(traffic_before, traffic_after)
+ assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ logger.info("Expected Behaviour: {}".format(result))
+
+ step("Configure static RP for (225.1.1.1-5) as R2 loopback interface")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins again from LHR,check IGMP joins and starg received")
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send traffic from FHR and verify mroute upstream")
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i2 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
+
+ for data in input_dict_sg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_prune_sent_to_LHR_and_FHR_when_PIMnbr_down_p2(request):
+ """
+ TC_38 Verify prune is sent to LHR and FHR when PIM nbr went down
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ step(
+ "Remove cisco connected link to simulate topo "
+ "LHR(FRR1(f1))----RP(cisco(f1)---FHR(FRR3(l1))"
+ )
+
+ intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"]
+ intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["interface"]
+ shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False)
+ shutdown_bringup_interface(tgen, "f1", intf_f1_c2, False)
+
+ step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3")
+ step(
+ "Enable IGMP of FRR1 interface and send IGMP joins "
+ " from FRR1 node for group range (225.1.1.1-5)"
+ )
+
+ intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
+ input_dict = {
+ "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {"i8": topo["routers"]["i8"]["links"]["f1"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (225.1.1.1-5) as R2")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Send traffic from FHR to all the groups ( 225.1.1.1 to 225.1.1.5) and send"
+ " multicast traffic"
+ )
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["l1"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["f1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i2 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
+ source_i1 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+
+ input_dict_all = [
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": "*",
+ "iif": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i1,
+ "iif": topo["routers"]["f1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ },
+ ]
+
+ step("Verify mroutes and iff upstream")
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ step("Verify mcast traffic received")
+ intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
+ input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}}
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut the link from LHR to RP from RP node")
+
+ intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["interface"]
+ shutdown_bringup_interface(tgen, "r2", intf_r2_f1, False)
+
+ step("Verify RP info after Shut the link from LHR to RP from RP node")
+ dut = "f1"
+ rp_address = "1.0.5.17"
+ SOURCE = "Static"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_starg = [
+ {
+ "dut": "f1",
+ "src_address": "*",
+ "iif": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ }
+ ]
+
+ input_dict_sg_i2 = [
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ },
+ ]
+
+ input_dict_sg_i1 = [
+ {
+ "dut": "f1",
+ "src_address": source_i1,
+ "iif": topo["routers"]["f1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ }
+ ]
+
+ input_dict_sg_i2_l1 = [
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ }
+ ]
+
+ step("Verify mroute after Shut the link from LHR to RP from RP node")
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "mroute still present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ for data in input_dict_sg_i1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify upstream after Shut the link from LHR to RP from RP node")
+
+ for data in input_dict_starg:
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "upstream still present \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ for data in input_dict_sg_i1:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("No shut the link from LHR to RP from RP node")
+
+ intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["interface"]
+ shutdown_bringup_interface(tgen, "r2", intf_r2_f1, True)
+
+ step("Verify RP info after No shut the link from LHR to RP from RP node")
+ dut = "f1"
+ rp_address = "1.0.5.17"
+ SOURCE = "Static"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "RP iif is not updated \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ step("Verify mroute after No shut the link from LHR to RP from RP node")
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i2:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify upstrem after No shut the link from LHR to RP from RP node")
+
+ for data in input_dict_starg:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i1:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i2:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify mcast traffic received after noshut LHR to RP from RP node")
+
+ intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
+ input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}}
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut the link from FHR to RP from RP node")
+
+ intf_r2_l1 = topo["routers"]["r2"]["links"]["l1"]["interface"]
+ shutdown_bringup_interface(tgen, "r2", intf_r2_l1, False)
+
+ kill_iperf(tgen, dut="i2", action="remove_traffic")
+
+ step("Verify RP info after Shut the link from FHR to RP from RP node")
+ dut = "l1"
+ rp_address = "1.0.5.17"
+ SOURCE = "Static"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify mroute after Shut the link from FHR to RP from RP node")
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify upstream after Shut the link from FHR to RP from RP node")
+
+ for data in input_dict_starg:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i1:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i2_l1:
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(" No shut the link from FHR to RP from RP node")
+
+ intf_r2_l1 = topo["routers"]["r2"]["links"]["l1"]["interface"]
+ shutdown_bringup_interface(tgen, "r2", intf_r2_l1, True)
+
+ step("Verify RP info after Noshut the link from FHR to RP from RP node")
+
+ dut = "l1"
+ rp_address = "1.0.5.17"
+ SOURCE = "Static"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "RP iif is not updated \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ step("Verify mroute after Noshut the link from FHR to RP from RP node")
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i2:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify mroute after Noshut the link from FHR to RP from RP node")
+
+ for data in input_dict_starg:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i1:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i2:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify mcast traffic received after noshut FHR to RP from RP node")
+ intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
+ input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}}
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut the link from FHR to RP from FHR node")
+
+ intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"]
+ shutdown_bringup_interface(tgen, "l1", intf_l1_r2, False)
+
+ step("Verify PIM Nbrs after Shut the link from FHR to RP from FHR node")
+
+ kill_iperf(tgen, dut="i6", action="remove_traffic")
+
+ step("Verify RP info after Shut the link from FHR to RP from FHR node")
+ dut = "l1"
+ rp_address = "1.0.5.17"
+ SOURCE = "Static"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify mroute after Shut the link from FHR to RP from FHR node")
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify upstream after Shut the link from FHR to RP from FHR node")
+ for data in input_dict_starg:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i1:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i2_l1:
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(" No shut the link from FHR to RP from FHR node")
+
+ intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"]
+ shutdown_bringup_interface(tgen, "l1", intf_l1_r2, True)
+
+ step("Verify RP info after No Shut the link from FHR to RP from FHR node")
+ dut = "l1"
+ rp_address = "1.0.5.17"
+ SOURCE = "Static"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "RP iif is not updated \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ step("Verify mroute after No Shut the link from FHR to RP from FHR node")
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i2:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify upstream after No Shut the link from FHR to RP from FHR node")
+
+ for data in input_dict_starg:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i1:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg_i2:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify mcast traffic received after noshut FHR to RP from FHR node")
+ intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
+ input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}}
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroute_flags_p1(request):
+ """
+ TC_47 Verify mroute flag in LHR and FHR node
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ step(
+ "Remove cisco connected link to simulate topo "
+ "LHR(FRR1(f1))----RP(cisco(f1)---FHR(FRR3(l1))"
+ )
+
+ intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"]
+ intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["interface"]
+ shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False)
+ shutdown_bringup_interface(tgen, "f1", intf_f1_c2, False)
+
+ step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3")
+ step(
+ "Enable IGMP of FRR1 interface and send IGMP joins "
+ " from FRR1 node for group range (225.1.1.1-5)"
+ )
+
+ intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
+ input_dict = {
+ "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {"i8": topo["routers"]["i8"]["links"]["f1"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (225.1.1.1-5) as R2")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Send traffic from FHR to all the groups ( 225.1.1.1 to 225.1.1.5) and send"
+ " multicast traffic"
+ )
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["l1"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["f1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i2 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
+ source_i1 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
+
+ input_dict_all = [
+ {
+ "dut": "l1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["l1"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": "*",
+ "iif": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i1,
+ "iif": topo["routers"]["f1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["f1"]["links"]["i8"]["interface"],
+ },
+ ]
+
+ step("Verify mroutes and iff upstream")
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ dut = "f1"
+ step("verify flag for (*,G) on f1")
+ src_address = "*"
+ flag = "SC"
+ result = verify_multicast_flag_state(
+ tgen, dut, src_address, IGMP_JOIN_RANGE_1, flag
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify flag for (S,G) on f1 for Remote spurce ")
+ src_address = source_i2
+ flag = "ST"
+ result = verify_multicast_flag_state(
+ tgen, dut, src_address, IGMP_JOIN_RANGE_1, flag
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_verify_multicast_traffic_when_LHR_connected_to_RP_p1(request):
+ """
+ TC_11: Verify multicast traffic flowing fine, when LHR connected to RP
+ Topology used:
+ FHR(FRR3(l1))---LHR(FRR1(r2)----RP(FRR2(f1))
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step(
+ "Remove FRR3 to cisco connected link to simulate topo "
+ "FHR(FRR3(l1))---LHR(FRR1(r2)----RP(FRR2(f1))"
+ )
+
+ intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"]
+ intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["interface"]
+ shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False)
+ shutdown_bringup_interface(tgen, "f1", intf_f1_c2, False)
+
+ step("Disable IGMP config from l1")
+ input_dict_2 = {
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {"igmp": {"version": "2", "delete": True,}}
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable the PIM on all the interfaces of FRR1, R2 and FRR3" " routers")
+ step(
+ "Enable IGMP on FRR1(r2) interface and send IGMP join (226.1.1.1-5)"
+ " and (232.1.1.1-5)"
+ )
+
+ intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"]
+ input_dict = {
+ "r2": {"igmp": {"interfaces": {intf_r2_i3: {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
+ _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3
+
+ input_join = {"i3": topo["routers"]["i3"]["links"]["r2"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in (f1)")
+
+ input_dict = {
+ "f1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["f1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": _GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send multicast traffic from FRR3 to 225.1.1.1-225.1.1.10" " receiver")
+
+ input_src = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "'show ip mroute' showing correct RPF and OIF interface for (*,G)"
+ " and (S,G) entries on all the nodes"
+ )
+
+ source_i1 = topo["routers"]["i1"]["links"]["l1"]["ipv4"].split("/")[0]
+ input_dict_all = [
+ {
+ "dut": "l1",
+ "src_address": source_i1,
+ "iif": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": topo["routers"]["r2"]["links"]["f1"]["interface"],
+ "oil": topo["routers"]["r2"]["links"]["i3"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": source_i1,
+ "iif": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ "oil": topo["routers"]["r2"]["links"]["i3"]["interface"],
+ },
+ ]
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Multicast traffic is flowing for all the groups verify"
+ "using 'show ip multicast'"
+ )
+
+ intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ intf_r2_l1 = topo["routers"]["r2"]["links"]["l1"]["interface"]
+ intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["interface"]
+ intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"]
+ intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"]
+ input_traffic = {
+ "l1": {"traffic_received": [intf_l1_i1]},
+ "r2": {"traffic_received": [intf_r2_l1], "traffic_sent": [intf_r2_i3]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut and No shut the receiver port")
+
+ intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"]
+ shutdown_bringup_interface(tgen, "r2", intf_r2_i3, False)
+
+ step(
+ "Verification: After Shut of receiver port, Verify (*,G) and "
+ "(S,G) got removed from LHR node (FRR1) using 'show ip mroute'"
+ )
+
+ input_dict_r2 = [
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": topo["routers"]["r2"]["links"]["f1"]["interface"],
+ "oil": topo["routers"]["r2"]["links"]["i3"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": source_i1,
+ "iif": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ "oil": topo["routers"]["r2"]["links"]["i3"]["interface"],
+ },
+ ]
+
+ for data in input_dict_r2:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n"
+ " Expected Behaviour: mroutes are cleared \n Error: {}".format(
+ tc_name, result
+ )
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ shutdown_bringup_interface(tgen, "r2", intf_r2_i3, True)
+
+ step(
+ "Verification: After No shut of receiver port , Verify (*,G)"
+ " and (S,G) got populated on LHR node (FRR1) using "
+ "'show ip mroute' 'show ip pim upstream'"
+ )
+
+ for data in input_dict_r2:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_r2:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Multicast traffic is resumed for all the groups verify "
+ "using 'show ip multicast'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut and No shut the source port")
+
+ intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ shutdown_bringup_interface(tgen, "l1", intf_l1_i1, False)
+
+ step(
+ "Verification: After Shut of source port, Verify (*,G) and "
+ "(S,G) got removed from LHR node (FRR1) using 'show ip mroute'"
+ )
+
+ input_dict_l1 = [
+ {
+ "dut": "l1",
+ "src_address": source_i1,
+ "iif": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ }
+ ]
+
+ for data in input_dict_l1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n"
+ "mroutes are cleared \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ shutdown_bringup_interface(tgen, "l1", intf_l1_i1, True)
+
+ step(
+ "Verification: After No shut of source port , Verify (*,G)"
+ " and (S,G) got populated on LHR node (FRR1) using "
+ "'show ip mroute' 'show ip pim upstream'"
+ )
+
+ for data in input_dict_l1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_l1:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Multicast traffic is resumed for all the groups verify "
+ "using 'show ip multicast'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut and No shut of LHR to cisco port from LHR side")
+
+ intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["interface"]
+ shutdown_bringup_interface(tgen, "r2", intf_r2_f1, False)
+
+ step(
+ "Verification: After Shut of source port, Verify (S,G) got "
+ "removed from LHR and FHR using 'show ip mroute'"
+ )
+
+ input_dict_r2_f1 = [
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": topo["routers"]["r2"]["links"]["f1"]["interface"],
+ "oil": topo["routers"]["r2"]["links"]["i3"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": "*",
+ "iif": "lo",
+ "oil": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ },
+ ]
+
+ for data in input_dict_r2_f1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n"
+ " mroutes are cleared \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ shutdown_bringup_interface(tgen, "r2", intf_r2_f1, True)
+
+ step(
+ "Verification: After No shut of source port , Verify (*,G)"
+ " and (S,G) got populated on LHR node (FRR1) using "
+ "'show ip mroute' 'show ip pim upstream'"
+ )
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Multicast traffic is resumed for all the groups verify "
+ "using 'show ip multicast'"
+ )
+
+ input_traffic_r2 = {
+ "r2": {"traffic_received": [intf_r2_l1], "traffic_sent": [intf_r2_i3]}
+ }
+ result = verify_multicast_traffic(tgen, input_traffic_r2)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut and no shut of FHR to LHR port from FHR side")
+
+ intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"]
+ shutdown_bringup_interface(tgen, "l1", intf_l1_r2, False)
+
+ step(
+ "Verification: After Shut of LHR to FHR port, Verify (S,G)"
+ "got removed from LHR 'show ip mroute'"
+ )
+
+ dut = "r2"
+ src_address = "*"
+ iif = topo["routers"]["r2"]["links"]["f1"]["interface"]
+ oil = topo["routers"]["r2"]["links"]["i3"]["interface"]
+
+ result = verify_ip_mroutes(tgen, dut, src_address, _IGMP_JOIN_RANGE, iif, oil)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ src_address = source_i1
+ iif = topo["routers"]["r2"]["links"]["l1"]["interface"]
+ oil = topo["routers"]["r2"]["links"]["i3"]["interface"]
+
+ result = verify_ip_mroutes(
+ tgen, dut, src_address, _IGMP_JOIN_RANGE, iif, oil, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n"
+ " mroutes are cleared \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ shutdown_bringup_interface(tgen, "l1", intf_l1_r2, True)
+
+ step(
+ "Verification: After No shut of source port , Verify (*,G)"
+ " and (S,G) got populated on LHR node (FRR1) using "
+ "'show ip mroute' 'show ip pim upstream'"
+ )
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Multicast traffic is resumed for all the groups verify "
+ "using 'show ip multicast'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic_r2)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_verify_multicast_traffic_when_FHR_connected_to_RP_p1(request):
+ """
+ TC_12: Verify multicast traffic is flowing fine when FHR is connected to RP
+ Topology used:
+ LHR(FRR1)---FHR(FRR3)----RP(FRR2)
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+ check_router_status(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step(
+ "Remove FRR3 to FRR2 connected link to simulate topo "
+ "FHR(FRR3)---LHR(FRR1)----RP(FFR2)"
+ )
+
+ intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"]
+ intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["interface"]
+ shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False)
+ shutdown_bringup_interface(tgen, "f1", intf_f1_c2, False)
+
+ step("Enable the PIM on all the interfaces of FRR1, R2 and FRR3" " routers")
+ step("Enable IGMP on FRR1(l1) interface and send IGMP join " " and (225.1.1.1-5)")
+
+ _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
+ _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3
+
+ input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP for (225.1.1.1-5) in (f1)")
+
+ input_dict = {
+ "f1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["f1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": _GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send multicast traffic from FRR3(r2) to 225.1.1.1-225.1.1.10" " receiver")
+
+ input_src = {"i3": topo["routers"]["i3"]["links"]["r2"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "'show ip mroute' showing correct RPF and OIF interface for (*,G)"
+ " and (S,G) entries on all the nodes"
+ )
+
+ source_i3 = topo["routers"]["i3"]["links"]["r2"]["ipv4"].split("/")[0]
+ input_dict_all = [
+ {
+ "dut": "l1",
+ "src_address": "*",
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "l1",
+ "src_address": source_i3,
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": topo["routers"]["r2"]["links"]["f1"]["interface"],
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": source_i3,
+ "iif": topo["routers"]["r2"]["links"]["i3"]["interface"],
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ ]
+
+ for data in input_dict_all:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_all:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"]
+ intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"]
+ intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "l1": {"traffic_received": [intf_l1_r2], "traffic_sent": [intf_l1_i1]}
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut the receiver(l1) port in 1 min interval")
+
+ shutdown_bringup_interface(tgen, "l1", intf_l1_i1, False)
+
+ step(
+ "Verification: After Shut of receiver port, Verify (*,G) and "
+ "(S,G) got removed from LHR node (FRR1) using 'show ip mroute'"
+ )
+
+ input_dict_l1 = [
+ {
+ "dut": "l1",
+ "src_address": source_i3,
+ "iif": topo["routers"]["l1"]["links"]["r2"]["interface"],
+ "oil": topo["routers"]["l1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ for data in input_dict_l1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n"
+ " mroutes are cleared \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ step("No shut the receiver(l1) port in 1 min interval")
+
+ shutdown_bringup_interface(tgen, "l1", intf_l1_i1, True)
+
+ step(
+ "Verification: After No shut of receiver port , Verify (*,G)"
+ " and (S,G) got populated on LHR node (FRR1) using "
+ "'show ip mroute' 'show ip pim upstream'"
+ )
+
+ for data in input_dict_l1:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_l1:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut the source(r2) port in 1 min interval")
+
+ intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"]
+ shutdown_bringup_interface(tgen, "r2", intf_r2_i3, False)
+
+ step(
+ "Verification: After Shut of source port, Verify (S,G) got "
+ "removed from FHR using 'show ip mroute'"
+ )
+
+ input_dict_r2 = [
+ {
+ "dut": "r2",
+ "src_address": source_i3,
+ "iif": topo["routers"]["r2"]["links"]["i3"]["interface"],
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ }
+ ]
+
+ for data in input_dict_r2:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n"
+ " mroutes are cleared \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ step("No shut the source(r2) port in 1 min interval")
+
+ shutdown_bringup_interface(tgen, "r2", intf_r2_i3, True)
+
+ step(
+ "Verification: After No shut of source port , Verify (*,G)"
+ " and (S,G) got populated on LHR and FHR using "
+ "'show ip mroute' 'show ip pim upstream'"
+ )
+
+ for data in input_dict_r2:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_r2:
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut FHR to RP port from FHR side")
+
+ intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["interface"]
+ shutdown_bringup_interface(tgen, "r2", intf_r2_f1, False)
+
+ step(
+ "Verification: After Shut of FHR to cisco port, Verify (*,G) "
+ "got removed from FHR and cisco node using 'show ip mroute'"
+ )
+
+ input_dict_all_star = [
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": topo["routers"]["r2"]["links"]["f1"]["interface"],
+ "oil": topo["routers"]["r2"]["links"]["l1"]["interface"],
+ },
+ {
+ "dut": "f1",
+ "src_address": "*",
+ "iif": "lo",
+ "oil": topo["routers"]["f1"]["links"]["r2"]["interface"],
+ },
+ ]
+
+ for data in input_dict_all_star:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n"
+ " mroutes are cleared \n Error: {}".format(tc_name, result)
+ )
+ logger.info("Expected Behaviour: {}".format(result))
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo4.py b/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo4.py
new file mode 100755
index 0000000000..e8579e2a1e
--- /dev/null
+++ b/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo4.py
@@ -0,0 +1,1122 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2020 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test multicast pim sm:
+
+Test steps
+- Create topology (setup module)
+- Bring up topology
+
+Following tests are covered:
+
+1. TC:48 Verify mroute after configuring black-hole route for RP and source
+2. TC:49 Verify mroute when RP is reachable using default route
+3. TC:50 Verify mroute when LHR,FHR,RP and transit routers reachable
+ using default routes
+4. TC:52 Verify PIM nbr after changing interface ip
+5. TC:53 Verify IGMP interface updated with correct detail after changing interface config
+6. TC:54 Verify received and transmit hello stats are getting cleared after PIM nbr reset
+
+
+"""
+
+import os
+import re
+import sys
+import json
+import time
+import datetime
+from time import sleep
+import pytest
+
+pytestmark = pytest.mark.pimd
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ step,
+ iperfSendIGMPJoin,
+ addKernelRoute,
+ reset_config_on_routers,
+ iperfSendTraffic,
+ kill_iperf,
+ shutdown_bringup_interface,
+ start_router,
+ stop_router,
+ apply_raw_config,
+ create_static_routes,
+ required_linux_kernel_version,
+ topo_daemons,
+)
+from lib.pim import (
+ create_pim_config,
+ create_igmp_config,
+ verify_igmp_groups,
+ verify_ip_mroutes,
+ clear_ip_pim_interface_traffic,
+ verify_igmp_config,
+ verify_pim_neighbors,
+ verify_pim_config,
+ verify_pim_interface,
+ verify_upstream_iif,
+ clear_ip_mroute,
+ verify_multicast_traffic,
+ verify_pim_rp_info,
+ verify_pim_interface_traffic,
+ verify_igmp_interface,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/multicast_pim_sm_topo4.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+TOPOLOGY = """
+
+
+ i4-----c1-------------c2---i5
+ | |
+ | |
+ i1-----l1------r2-----f1---i2
+ | | | |
+ | | | |
+ i7 i6 i3 i8
+
+ Description:
+ i1, i2, i3. i4, i5, i6, i7, i8 - FRR running iperf to send IGMP
+ join and traffic
+ l1 - LHR
+ f1 - FHR
+ r2 - FRR router
+ c1 - FRR router
+ c2 - FRR router
+"""
+
+# Global variables
+
+GROUP_RANGE = "224.0.0.0/4"
+IGMP_GROUP = "225.1.1.1/32"
+IGMP_JOIN = "225.1.1.1"
+GROUP_RANGE_1 = [
+ "225.1.1.1/32",
+ "225.1.1.2/32",
+ "225.1.1.3/32",
+ "225.1.1.4/32",
+ "225.1.1.5/32",
+]
+IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"]
+NEW_ADDRESS_1 = "192.168.20.1"
+NEW_ADDRESS_2 = "192.168.20.2"
+NEW_ADDRESS_1_SUBNET = "192.168.20.1/24"
+NEW_ADDRESS_2_SUBNET = "192.168.20.2/24"
+
+
+class CreateTopo(Topo):
+ """
+ Test BasicTopo - topology 1
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function"""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.19")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+ logger.info("Master Topology: \n {}".format(TOPOLOGY))
+
+ logger.info("Running setup_module to create topology")
+
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False
+):
+ """
+ API to do pre-configuration to send IGMP join and multicast
+ traffic
+
+ parameters:
+ -----------
+ * `tgen`: topogen object
+ * `topo`: input json data
+ * `tc_name`: caller test case name
+ * `iperf`: router running iperf
+ * `iperf_intf`: interface name router running iperf
+ * `GROUP_RANGE`: group range
+ * `join`: IGMP join, default False
+ * `traffic`: multicast traffic, default False
+ """
+
+ if join:
+ # Add route to kernal
+ result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ if traffic:
+ # Add route to kernal
+ result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ router_list = tgen.routers()
+ for router in router_list.keys():
+ if router == iperf:
+ continue
+
+ rnode = router_list[router]
+ rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter")
+
+ for router in topo["routers"].keys():
+ if "static_routes" in topo["routers"][router]:
+ static_routes = topo["routers"][router]["static_routes"]
+ for static_route in static_routes:
+ network = static_route["network"]
+ next_hop = static_route["next_hop"]
+ if type(network) is not list:
+ network = [network]
+
+ return True
+
+
+def verify_state_incremented(state_before, state_after):
+ """
+ API to compare interface traffic state incrementing
+
+ Parameters
+ ----------
+ * `state_before` : State dictionary for any particular instance
+ * `state_after` : State dictionary for any particular instance
+ """
+
+ for router, state_data in state_before.items():
+ for state, value in state_data.items():
+ if state_before[router][state] >= state_after[router][state]:
+ errormsg = (
+ "[DUT: %s]: state %s value has not"
+ " incremented, Initial value: %s, "
+ "Current value: %s [FAILED!!]"
+ % (
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is "
+ "incremented, Initial value: %s, Current value: %s"
+ " [PASSED!!]",
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+
+ return True
+
+
+def test_mroute_when_RP_reachable_default_route_p2(request):
+ """
+ TC_49 Verify mroute when and source RP is reachable using default route
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ step(
+ "Remove c1-c2 connected link to simulate topo "
+ "c1(FHR)---l1(RP)----r2---f1-----c2(LHR)"
+ )
+
+ intf_c1_c2 = topo["routers"]["c1"]["links"]["c2"]["interface"]
+ intf_c2_c1 = topo["routers"]["c2"]["links"]["c1"]["interface"]
+ shutdown_bringup_interface(tgen, "c1", intf_c1_c2, False)
+ shutdown_bringup_interface(tgen, "c2", intf_c2_c1, False)
+
+ step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3")
+ step(
+ "Enable IGMP of FRR1 interface and send IGMP joins "
+ " from FRR1 node for group range (225.1.1.1-5)"
+ )
+
+ intf_c2_i5 = topo["routers"]["c2"]["links"]["i5"]["interface"]
+ input_dict = {
+ "c2": {"igmp": {"interfaces": {intf_c2_i5: {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {"i5": topo["routers"]["i5"]["links"]["c2"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (225.1.1.1-5) as R2")
+
+ input_dict = {
+ "l1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["l1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send traffic from C1 to all the groups ( 225.1.1.1 to 225.1.1.5)")
+
+ input_src = {"i4": topo["routers"]["i4"]["links"]["c1"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i4 = topo["routers"]["i4"]["links"]["c1"]["ipv4"].split("/")[0]
+
+ input_dict_starg = [
+ {
+ "dut": "c2",
+ "src_address": "*",
+ "iif": topo["routers"]["c2"]["links"]["f1"]["interface"],
+ "oil": topo["routers"]["c2"]["links"]["i5"]["interface"],
+ }
+ ]
+
+ input_dict_sg = [
+ {
+ "dut": "c2",
+ "src_address": source_i4,
+ "iif": topo["routers"]["c2"]["links"]["f1"]["interface"],
+ "oil": topo["routers"]["c2"]["links"]["i5"]["interface"],
+ }
+ ]
+
+ step("Verify mroutes and iff upstream")
+
+ for data in input_dict_sg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Delete static routes on c2")
+ input_dict = {
+ "c2": {
+ "static_routes": [
+ {
+ "network": ["1.0.4.11/32", "10.0.2.1/24", "10.0.1.2/24"],
+ "next_hop": "10.0.3.2",
+ "delete": True,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Verify RP info unknown after removing static route from c2 ")
+ dut = "c2"
+ rp_address = topo["routers"]["l1"]["links"]["lo"]["ipv4"].split("/")[0]
+ SOURCE = "Static"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify mroute not present after Delete of static routes on c1")
+
+ for data in input_dict_sg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure default routes on c2")
+
+ intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["ipv4"].split("/")[0]
+
+ input_dict = {
+ "c2": {"static_routes": [{"network": "0.0.0.0/0", "next_hop": intf_f1_c2}]}
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("applying ip nht config on c2")
+
+ raw_config = {"c2": {"raw_config": ["ip nht resolve-via-default"]}}
+
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify RP info is NOT unknown after removing static route from c2 ")
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify (s,g) populated after adding default route ")
+
+ for data in input_dict_sg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify (*,g) populated after adding default route ")
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroute_with_RP_default_route_all_nodes_p2(request):
+ """
+ TC_50 Verify mroute when LHR,FHR,RP and transit routers reachable
+ using default routes
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ step(
+ "Remove c1-c2 connected link to simulate topo "
+ "c1(LHR)---l1(RP)----r2---f1-----c2(FHR)"
+ )
+
+ intf_c1_c2 = topo["routers"]["c1"]["links"]["c2"]["interface"]
+ intf_c2_c1 = topo["routers"]["c2"]["links"]["c1"]["interface"]
+ shutdown_bringup_interface(tgen, "c1", intf_c1_c2, False)
+ shutdown_bringup_interface(tgen, "c2", intf_c2_c1, False)
+
+ step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3")
+ step(
+ "Enable IGMP of FRR1 interface and send IGMP joins "
+ " from FRR1 node for group range (225.1.1.1-5)"
+ )
+
+ intf_c1_i4 = topo["routers"]["c1"]["links"]["i4"]["interface"]
+ input_dict = {
+ "c1": {"igmp": {"interfaces": {intf_c1_i4: {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {"i4": topo["routers"]["i4"]["links"]["c1"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (225.1.1.1-5) as R2")
+
+ input_dict = {
+ "l1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["l1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send traffic from C2 to all the groups ( 225.1.1.1 to 225.1.1.5)")
+
+ input_src = {"i5": topo["routers"]["i5"]["links"]["c2"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i5 = topo["routers"]["i5"]["links"]["c2"]["ipv4"].split("/")[0]
+
+ input_dict_starg = [
+ {
+ "dut": "c1",
+ "src_address": "*",
+ "iif": topo["routers"]["c1"]["links"]["l1"]["interface"],
+ "oil": topo["routers"]["c1"]["links"]["i4"]["interface"],
+ }
+ ]
+
+ input_dict_sg = [
+ {
+ "dut": "c1",
+ "src_address": source_i5,
+ "iif": topo["routers"]["c1"]["links"]["l1"]["interface"],
+ "oil": topo["routers"]["c1"]["links"]["i4"]["interface"],
+ }
+ ]
+
+ step("Verify mroutes and iff upstream")
+
+ for data in input_dict_sg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Delete static routes RP on all the nodes")
+ input_dict = {
+ "c2": {
+ "static_routes": [
+ {"network": ["1.0.4.11/32"], "next_hop": "10.0.3.2", "delete": True}
+ ]
+ },
+ "c1": {
+ "static_routes": [
+ {"network": ["1.0.4.11/32"], "next_hop": "10.0.2.2", "delete": True}
+ ]
+ },
+ "r2": {
+ "static_routes": [
+ {"network": ["1.0.4.11/32"], "next_hop": "10.0.12.1", "delete": True}
+ ]
+ },
+ "f1": {
+ "static_routes": [
+ {"network": ["1.0.4.11/32"], "next_hop": "10.0.7.2", "delete": True}
+ ]
+ },
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("Verify RP info unknown after removing static route from c2 ")
+ dut = "c2"
+ rp_address = topo["routers"]["l1"]["links"]["lo"]["ipv4"].split("/")[0]
+ SOURCE = "Static"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure default routes on all the nodes")
+
+ intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["ipv4"].split("/")[0]
+ intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["ipv4"].split("/")[0]
+ intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["ipv4"].split("/")[0]
+ intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["ipv4"].split("/")[0]
+
+ input_dict = {
+ "c1": {"static_routes": [{"network": "0.0.0.0/0", "next_hop": intf_l1_c1}]},
+ "c2": {"static_routes": [{"network": "0.0.0.0/0", "next_hop": intf_f1_c2}]},
+ "r2": {"static_routes": [{"network": "0.0.0.0/0", "next_hop": intf_l1_r2}]},
+ "f1": {"static_routes": [{"network": "0.0.0.0/0", "next_hop": intf_r2_f1}]},
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step("applying ip nht config on c2")
+
+ raw_config = {
+ "c1": {"raw_config": ["ip nht resolve-via-default"]},
+ "c2": {"raw_config": ["ip nht resolve-via-default"]},
+ "r2": {"raw_config": ["ip nht resolve-via-default"]},
+ "f1": {"raw_config": ["ip nht resolve-via-default"]},
+ "l1": {"raw_config": ["ip nht resolve-via-default"]},
+ }
+
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify RP info Not unknown after removing static route from c2 ")
+ dut = "c2"
+ step("Verify RP info is NOT unknown after removing static route from c2 ")
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify (s,g) populated after adding default route ")
+
+ for data in input_dict_sg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify (*,g) populated after adding default route ")
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_PIM_hello_tx_rx_p1(request):
+ """
+ TC_54 Verify received and transmit hello stats
+ are getting cleared after PIM nbr reset
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ step(
+ "Remove c1-c2 connected link to simulate topo "
+ "c1(LHR)---l1(RP)----r2---f1-----c2(FHR)"
+ )
+
+ intf_c1_c2 = topo["routers"]["c1"]["links"]["c2"]["interface"]
+ intf_c2_c1 = topo["routers"]["c2"]["links"]["c1"]["interface"]
+ shutdown_bringup_interface(tgen, "c1", intf_c1_c2, False)
+ shutdown_bringup_interface(tgen, "c2", intf_c2_c1, False)
+
+ step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3")
+ step(
+ "Enable IGMP of FRR1 interface and send IGMP joins "
+ " from FRR1 node for group range (225.1.1.1-5)"
+ )
+
+ intf_c1_i4 = topo["routers"]["c1"]["links"]["i4"]["interface"]
+ input_dict = {
+ "c1": {"igmp": {"interfaces": {intf_c1_i4: {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {"i4": topo["routers"]["i4"]["links"]["c1"]["interface"]}
+
+ for recvr, recvr_intf in input_join.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (225.1.1.1-5) as R2")
+
+ input_dict = {
+ "l1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["l1"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send Mcast traffic from C2 to all the groups ( 225.1.1.1 to 225.1.1.5)")
+
+ input_src = {"i5": topo["routers"]["i5"]["links"]["c2"]["interface"]}
+
+ for src, src_intf in input_src.items():
+ result = config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i5 = topo["routers"]["i5"]["links"]["c2"]["ipv4"].split("/")[0]
+
+ input_dict_starg = [
+ {
+ "dut": "c1",
+ "src_address": "*",
+ "iif": topo["routers"]["c1"]["links"]["l1"]["interface"],
+ "oil": topo["routers"]["c1"]["links"]["i4"]["interface"],
+ }
+ ]
+
+ input_dict_sg = [
+ {
+ "dut": "c1",
+ "src_address": source_i5,
+ "iif": topo["routers"]["c1"]["links"]["l1"]["interface"],
+ "oil": topo["routers"]["c1"]["links"]["i4"]["interface"],
+ }
+ ]
+
+ step("(*,G) and (S,G) created on f1 and node verify using 'show ip mroute'")
+ for data in input_dict_sg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_starg:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"]
+ intf_c1_l1 = topo["routers"]["c1"]["links"]["l1"]["interface"]
+
+ step("verify before stats on C1")
+ state_dict = {"c1": {intf_c1_l1: ["helloTx", "helloRx"],}}
+
+ c1_state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ c1_state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step("Flap PIM nbr while doing interface c1-l1 interface shut from f1 side")
+ shutdown_bringup_interface(tgen, "c1", intf_c1_l1, False)
+
+ step(
+ "After shut of local interface from c1 , verify rx/tx hello counters are cleared on c1 side"
+ "verify using 'show ip pim interface traffic'"
+ )
+ shutdown_bringup_interface(tgen, "c1", intf_c1_l1, True)
+
+ step("verify stats after on c1")
+ c1_state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ c1_state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step("verify stats not increamented on c1")
+ result = verify_state_incremented(c1_state_before, c1_state_after)
+ assert (
+ result is not True
+ ), "Testcase{} : Failed Error: {}" "stats incremented".format(tc_name, result)
+
+ step("verify before stats on l1")
+ l1_state_dict = {"l1": {intf_l1_c1: ["helloTx", "helloRx"],}}
+
+ l1_state_before = verify_pim_interface_traffic(tgen, l1_state_dict)
+ assert isinstance(
+ l1_state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step("Flap PIM nbr while doing interface r2-c1 shut from r2 side")
+ shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False)
+
+ step(
+ "After shut the interface from r2 side , verify r2 side rx and tx of hello"
+ "counters are resetted show ip pim interface traffic"
+ )
+ shutdown_bringup_interface(tgen, "l1", intf_l1_c1, True)
+
+ step("verify stats after on l1")
+ l1_state_after = verify_pim_interface_traffic(tgen, l1_state_dict)
+ assert isinstance(
+ l1_state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step("verify stats not increamented on l1")
+ result = verify_state_incremented(l1_state_before, l1_state_after)
+ assert (
+ result is not True
+ ), "Testcase{} : Failed Error: {}" "stats incremented".format(tc_name, result)
+
+ step("Reinit the dict")
+ c1_state_before = {}
+ l1_state_before = {}
+ c1_state_after = {}
+ l1_state_after = {}
+
+ step("verify before stats on C1")
+ state_dict = {"c1": {intf_c1_l1: ["helloTx", "helloRx"],}}
+
+ c1_state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ c1_state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step("Flap c1-r2 pim nbr while changing ip address from c1 side")
+ c1_l1_ip_subnet = topo["routers"]["c1"]["links"]["l1"]["ipv4"]
+
+ raw_config = {
+ "c1": {
+ "raw_config": [
+ "interface {}".format(intf_c1_l1),
+ "no ip address {}".format(c1_l1_ip_subnet),
+ "ip address {}".format(NEW_ADDRESS_2_SUBNET),
+ ]
+ }
+ }
+
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify stats after on c1")
+ c1_state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ c1_state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step("verify stats not increamented on c1")
+ result = verify_state_incremented(c1_state_before, c1_state_after)
+ assert (
+ result is not True
+ ), "Testcase{} : Failed Error: {}" "stats incremented".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/multicast-pim-static-rp-topo1/__init__.py b/tests/topotests/multicast-pim-static-rp-topo1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/multicast-pim-static-rp-topo1/__init__.py
diff --git a/tests/topotests/multicast-pim-static-rp-topo1/multicast_pim_static_rp.json b/tests/topotests/multicast-pim-static-rp-topo1/multicast_pim_static_rp.json
new file mode 100644
index 0000000000..6d6c047b00
--- /dev/null
+++ b/tests/topotests/multicast-pim-static-rp-topo1/multicast_pim_static_rp.json
@@ -0,0 +1,93 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32},
+ "routers": {
+ "r0": {"links": {"r1": {"ipv4": "auto"}}},
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r0": {"ipv4": "auto", "pim": "enable"},
+ "r2": {"ipv4": "auto", "pim": "enable"},
+ "r3": {"ipv4": "auto", "pim": "enable"},
+ "r4": {"ipv4": "auto", "pim": "enable"}
+ },
+ "pim": {
+ "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": ["224.0.0.0/4"]}]
+ },
+ "igmp": {"interfaces": {"r1-r0-eth0": {"igmp": {"version": "2"}}}},
+ "static_routes": [
+ {"network": "10.0.4.0/24", "next_hop": "10.0.2.2"},
+ {"network": "10.0.5.0/24", "next_hop": "10.0.2.2"},
+ {"network": "10.0.6.0/24", "next_hop": "10.0.2.2", "admin_distance": 1},
+ {"network": "10.0.6.0/24", "next_hop": "10.0.1.2", "admin_distance": 2},
+ {"network": "1.0.2.17/32", "next_hop": "10.0.1.2", "admin_distance": 1},
+ {"network": "1.0.2.17/32", "next_hop": "10.0.2.2", "admin_distance": 2},
+ {"network": "1.0.3.17/32", "next_hop": "10.0.2.2"},
+ {"network": "1.0.4.17/32", "next_hop": "10.0.3.2"}
+ ]
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1": {"ipv4": "auto", "pim": "enable"},
+ "r3": {"ipv4": "auto", "pim": "enable"}
+ },
+ "pim": {
+ "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": ["224.0.0.0/4"]}]
+ },
+ "static_routes": [
+ {"network": "10.0.0.0/24", "next_hop": "10.0.1.1"},
+ {"network": "10.0.2.0/24", "next_hop": "10.0.1.1"},
+ {"network": "10.0.3.0/24", "next_hop": "10.0.1.1"},
+ {"network": "10.0.5.0/24", "next_hop": "10.0.4.2"},
+ {"network": "10.0.6.0/24", "next_hop": "10.0.4.2"},
+ {"network": "1.0.1.17/32", "next_hop": "10.0.1.1"},
+ {"network": "1.0.3.17/32", "next_hop": "10.0.4.2"},
+ {"network": "1.0.4.17/32", "next_hop": "10.0.1.1"}
+ ]
+ },
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1": {"ipv4": "auto", "pim": "enable"},
+ "r2": {"ipv4": "auto", "pim": "enable"},
+ "r4": {"ipv4": "auto", "pim": "enable"},
+ "r5": {"ipv4": "auto", "pim": "enable"}
+ },
+ "pim": {
+ "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": ["224.0.0.0/4"]}]
+ },
+ "static_routes": [
+ {"network": "10.0.0.0/24", "next_hop": "10.0.2.1"},
+ {"network": "10.0.1.0/24", "next_hop": "10.0.2.1"},
+ {"network": "10.0.3.0/24", "next_hop": "10.0.2.1"},
+ {"network": "1.0.1.17/32", "next_hop": "10.0.2.1"},
+ {"network": "1.0.2.17/32", "next_hop": "10.0.4.1"},
+ {"network": "1.0.4.17/32", "next_hop": "10.0.5.2"}
+ ]
+ },
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1": {"ipv4": "auto", "pim": "enable"},
+ "r3": {"ipv4": "auto", "pim": "enable"}
+ },
+ "pim": {
+ "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": ["224.0.0.0/4"]}]
+ },
+ "static_routes": [
+ {"network": "10.0.0.0/24", "next_hop": "10.0.3.1"},
+ {"network": "10.0.1.0/24", "next_hop": "10.0.3.1"},
+ {"network": "10.0.2.0/24", "next_hop": "10.0.3.1"},
+ {"network": "10.0.4.0/24", "next_hop": "10.0.5.1"},
+ {"network": "10.0.6.0/24", "next_hop": "10.0.5.1"},
+ {"network": "1.0.1.17/32", "next_hop": "10.0.3.1"},
+ {"network": "1.0.2.17/32", "next_hop": "10.0.3.1"},
+ {"network": "1.0.3.17/32", "next_hop": "10.0.5.1"}
+ ]
+ },
+ "r5": {"links": {"r3": {"ipv4": "auto"}}}
+ }
+}
diff --git a/tests/topotests/multicast-pim-static-rp-topo1/test_multicast_pim_static_rp.py b/tests/topotests/multicast-pim-static-rp-topo1/test_multicast_pim_static_rp.py
new file mode 100755
index 0000000000..8dfdd50527
--- /dev/null
+++ b/tests/topotests/multicast-pim-static-rp-topo1/test_multicast_pim_static_rp.py
@@ -0,0 +1,3810 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test Multicast basic functionality:
+
+Topology:
+
+ _______r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+ | |
+ |_____________|
+ r4
+
+Test steps
+- Create topology (setup module)
+- Bring up topology
+
+TC_1 : Verify upstream interfaces(IIF) and join state are updated properly
+ after adding and deleting the static RP
+TC_2 : Verify IIF and OIL in "show ip pim state" updated properly after
+ adding and deleting the static RP
+TC_3: (*, G) Mroute entry are cleared when static RP gets deleted
+TC_4: Verify (*,G) prune is send towards the RP after deleting the static RP
+TC_5: Verify OIF entry for RP is cleared when RP becomes unreachable
+TC_6: Verify IIF and OIL in "show ip pim state" updated properly when RP
+ becomes unreachable
+TC_7 : Verify upstream interfaces(IIF) and join state are updated properly
+ after adding and deleting the static RP
+TC_8: Verify (*,G) prune is send towards the RP when RP becomes unreachable
+TC_9 : Verify RP configured after IGMP join received, PIM join towards RP is
+ sent immediately
+TC_10 : Verify RP becomes reachable after IGMP join received, PIM join
+ towards RP is sent immediately
+TC_11 : Verify PIM join send towards the higher preferred RP
+TC_12 : Verify PIM prune send towards the lower preferred RP
+TC_13 : Verify RPF interface is updated in mroute (kernel) when higher
+ preferred overlapping RP configured
+TC_14 : Verify IIF and OIL in "show ip pim state" updated properly when higher
+ preferred overlapping RP configured
+TC_15 : Verify upstream interfaces(IIF) and join state are updated when higher
+ preferred overlapping RP is configured
+TC_16 : Verify join is send to lower preferred RP, when higher preferred RP
+ gets deleted
+TC_17 : Verify prune is send to higher preferred RP when higher preferred RP
+ gets deleted
+TC_18 : Verify RPF interface updated in mroute when higher preferred RP gets
+ deleted
+TC_19 : Verify IIF and OIL in "show ip pim state" updated when higher
+ preferred overlapping RP is deleted
+TC_20 : Verfiy PIM upstream IIF updated when higher preferred overlapping RP
+ deleted
+TC_21_1 : Verify OIF and RFP for (*,G) and (S,G) when static RP configure in
+ LHR router
+TC_21_2 : Verify OIF and RFP for (*,G) and (S,G) when static RP configure in
+ LHR router
+TC_22_1 : Verify OIF and RPF for (*,G) and (S,G) when static RP configure in
+ FHR router
+TC_22_2 : Verify OIF and RPF for (*,G) and (S,G) when static RP configure in
+ FHR router
+TC_23 : Verify (*,G) and (S,G) populated correctly when RPT and SPT path are
+ different
+TC_24 : Verify (*,G) and (S,G) populated correctly when SPT and RPT share the
+ same path
+TC_25 : Verify (*,G) and (S,G) populated correctly after clearing the PIM ,
+ IGMP and mroutes joins
+TC_26 : Restart the PIMd process and verify PIM joins , and mroutes entries
+TC_27 : Configure multiple groups (10 grps) with same RP address
+TC_28 : Configure multiple groups (10 grps) with different RP address
+TC_29 : Verify IIF and OIL in updated in mroute when upstream interface
+ configure as RP
+TC_30 : Verify IIF and OIL change to other path after shut the primary path
+TC_31 : Verify RP info and (*,G) mroute after deleting the RP and shut / no
+ shut the RPF interface.
+TC_32 : Verify RP info and (*,G) mroute after deleting the RP and shut / no
+ shut the RPF inteface
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+from time import sleep
+import datetime
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ step,
+ iperfSendIGMPJoin,
+ iperfSendTraffic,
+ addKernelRoute,
+ shutdown_bringup_interface,
+ kill_router_daemons,
+ start_router_daemons,
+ create_static_routes,
+ kill_iperf,
+ topo_daemons,
+)
+from lib.pim import (
+ create_pim_config,
+ verify_igmp_groups,
+ verify_upstream_iif,
+ verify_join_state_and_timer,
+ verify_ip_mroutes,
+ verify_pim_neighbors,
+ verify_pim_interface_traffic,
+ verify_pim_rp_info,
+ verify_pim_state,
+ clear_ip_pim_interface_traffic,
+ clear_ip_igmp_interfaces,
+ clear_ip_pim_interfaces,
+ clear_ip_mroute,
+ clear_ip_mroute_verify,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/multicast_pim_static_rp.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ logger.info("Could not read file:", jsonFile)
+
+# Global variables
+GROUP_RANGE_ALL = "224.0.0.0/4"
+GROUP_RANGE = "225.1.1.1/32"
+GROUP_RANGE_LIST_1 = [
+ "225.1.1.1/32",
+ "225.1.1.2/32",
+ "225.1.1.3/32",
+ "225.1.1.4/32",
+ "225.1.1.5/32",
+]
+GROUP_RANGE_LIST_2 = [
+ "225.1.1.6/32",
+ "225.1.1.7/32",
+ "225.1.1.8/32",
+ "225.1.1.9/32",
+ "225.1.1.10/32",
+]
+GROUP_ADDRESS = "225.1.1.1"
+GROUP_ADDRESS_LIST_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"]
+GROUP_ADDRESS_LIST_2 = [
+ "225.1.1.6",
+ "225.1.1.7",
+ "225.1.1.8",
+ "225.1.1.9",
+ "225.1.1.10",
+]
+STAR = "*"
+SOURCE_ADDRESS = "10.0.6.2"
+SOURCE = "Static"
+
+
+class CreateTopo(Topo):
+ """
+ Test BasicTopo - topology 1
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function"""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+ global topo
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ topology = """
+
+ _______r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+ | |
+ |_____________|
+ r4
+
+ """
+ logger.info("Master Topology: \n {}".format(topology))
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Verify PIM neighbors
+ result = verify_pim_neighbors(tgen, topo)
+ assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+def config_to_send_igmp_join_and_traffic(tgen, tc_name):
+ """
+ API to do pre-configuration to send IGMP join and multicast
+ traffic
+
+ parameters:
+ -----------
+ * `tgen`: topogen object
+ * `tc_name`: caller test case name
+ """
+
+ step("r0: Add route to kernal")
+ result = addKernelRoute(tgen, "r0", "r0-r1-eth0", GROUP_RANGE_ALL)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r5: Add route to kernal")
+ result = addKernelRoute(tgen, "r5", "r5-r3-eth0", GROUP_RANGE_ALL)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ rnode = tgen.routers()["r1"]
+ rnode.run("ip route add 10.0.6.0/24 via 10.0.2.2")
+ rnode = tgen.routers()["r2"]
+ rnode.run("ip route add 10.0.6.0/24 via 10.0.4.2")
+ rnode = tgen.routers()["r4"]
+ rnode.run("ip route add 10.0.6.0/24 via 10.0.5.1")
+
+ router_list = tgen.routers()
+ for router in router_list.keys():
+ rnode = router_list[router]
+ rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter")
+
+ return True
+
+
+def verify_mroute_repopulated(uptime_before, uptime_after):
+ """
+ API to compare uptime for mroutes
+
+ Parameters
+ ----------
+ * `uptime_before` : Uptime dictionary for any particular instance
+ * `uptime_after` : Uptime dictionary for any particular instance
+ """
+
+ for group in uptime_before.keys():
+ for source in uptime_before[group].keys():
+ if set(uptime_before[group]) != set(uptime_after[group]):
+ errormsg = (
+ "mroute (%s, %s) has not come"
+ " up after mroute clear [FAILED!!]" % (source, group)
+ )
+ return errormsg
+
+ d1 = datetime.datetime.strptime(uptime_before[group][source], "%H:%M:%S")
+ d2 = datetime.datetime.strptime(uptime_after[group][source], "%H:%M:%S")
+ if d2 >= d1:
+ errormsg = "mroute (%s, %s) is not " "repopulated [FAILED!!]" % (
+ source,
+ group,
+ )
+ return errormsg
+
+ logger.info("mroute (%s, %s) is " "repopulated [PASSED!!]", source, group)
+
+ return True
+
+
+def verify_state_incremented(state_before, state_after):
+ """
+ API to compare interface traffic state incrementing
+
+ Parameters
+ ----------
+ * `state_before` : State dictionary for any particular instance
+ * `state_after` : State dictionary for any particular instance
+ """
+
+ for router, state_data in state_before.items():
+ for state, value in state_data.items():
+ if state_before[router][state] >= state_after[router][state]:
+ errormsg = (
+ "[DUT: %s]: state %s value has not"
+ " incremented, Initial value: %s, "
+ "Current value: %s [FAILED!!]"
+ % (
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is "
+ "incremented, Initial value: %s, Current value: %s"
+ " [PASSED!!]",
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+
+ return True
+
+
+def test_add_delete_static_RP_p0(request):
+ """
+ TC_1_P0 : Verify upstream interfaces(IIF) and join state are updated
+ properly after adding and deleting the static RP
+ TC_2_P0 : Verify IIF and OIL in "show ip pim state" updated properly
+ after adding and deleting the static RP
+ TC_3_P0: (*, G) Mroute entry are cleared when static RP gets deleted
+ TC_4_P0: Verify (*,G) prune is send towards the RP after deleting the
+ static RP
+
+ Topology used:
+ r0------r1-----r2
+ iperf DUT RP
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface and send IGMP " "join (225.1.1.1) to r1")
+ step("Configure r2 loopback interface as RP")
+ step("Enable PIM between r1 and r3")
+
+ step("r1: Verify show ip igmp group without any IGMP join")
+ dut = "r1"
+ interface = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, interface, GROUP_ADDRESS, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1: Verify show ip pim interface traffic without any IGMP join")
+ state_dict = {"r1": {"r1-r2-eth1": ["pruneTx"]}}
+
+ state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step("r0 : Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups")
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify RP info")
+ dut = "r1"
+ iif = "r1-r2-eth1"
+ rp_address = "1.0.2.17"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ step("r1: Verify ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify ip pim join")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ step("r1: Delete RP configuration")
+
+ # Delete RP configuration
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify RP info")
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1: Verify upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1: Verify upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1: Verify PIM state")
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1: Verify ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1: Verify show ip pim interface traffic without any IGMP join")
+ state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+ write_test_footer(tc_name)
+
+
+def test_SPT_RPT_path_same_p1(request):
+ """
+ TC_24_P1 : Verify (*,G) and (S,G) populated correctly when SPT and RPT
+ share the same path
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1 r3-----r5
+
+ r1 : LHR
+ r2 : RP
+ r3 : FHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ dut = "r1"
+ intf = "r1-r3-eth2"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+ intf = "r1-r4-eth3"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ dut = "r3"
+ intf = "r3-r1-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+ intf = "r3-r4-eth2"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to R1")
+ step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
+ step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+ step("Send multicast traffic from R3")
+
+ step("r2: Verify RP info")
+ dut = "r2"
+ rp_address = "1.0.2.17"
+ iif = "lo"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r0: Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups")
+ dut = "r1"
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r5: Send multicast traffic for group 225.1.1.1")
+ result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = "r1-r2-eth1"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = "r1-r2-eth1"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream IIF interface")
+ dut = "r2"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ oif = "r2-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream IIF interface")
+ iif = "r2-r3-eth1"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = "r3-r5-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = "r3-r2-eth1"
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+ write_test_footer(tc_name)
+
+
+def test_not_reachable_static_RP_p0(request):
+ """
+ TC_5_P0: Verify OIF entry for RP is cleared when RP becomes unreachable
+ TC_6_P0: Verify IIF and OIL in "show ip pim state" updated properly when
+ RP becomes unreachable
+ TC_7_P0 : Verify upstream interfaces(IIF) and join state are updated
+ properly after adding and deleting the static RP
+ TC_8_P0: Verify (*,G) prune is send towards the RP when RP becomes
+ unreachable
+
+ Topology used:
+ r0------r1-----r2
+ iperf DUT RP
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ dut = "r1"
+ intf = "r1-r3-eth2"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ dut = "r1"
+ intf = "r1-r4-eth3"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step(
+ "r1: (*,G) prune is not sent towards the RP interface, verify using"
+ "show ip pim interface traffic"
+ )
+ state_dict = {"r1": {"r1-r2-eth1": ["pruneTx"]}}
+ state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface and send IGMP " "join (225.1.1.1) to r1")
+ step("Configure r2 loopback interface as RP")
+ step("Enable PIM between r1 and r2")
+
+ step("r0 : Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify rp info")
+ dut = "r1"
+ iif = "r1-r2-eth1"
+ rp_address = "1.0.2.17"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups")
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify PIM state")
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 :Verify ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Make RP un-reachable")
+ dut = "r1"
+ input_dict = {
+ dut: {
+ "static_routes": [
+ {"network": "1.0.2.17/32", "next_hop": "10.0.1.2", "delete": True}
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Check RP detail using show ip pim rp-info OIF should be unknown")
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, "Unknown", rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "r1 : OIL should be same and IIF should be cleared on R1 verify"
+ "using show ip pim state"
+ )
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1: upstream IIF should be unknown , verify using show ip pim" "upstream")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "r1: join state should not be joined and join timer should stop,"
+ "verify using show ip pim upstream"
+ )
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "r1: (*,G) prune is sent towards the RP interface, verify using"
+ "show ip pim interface traffic"
+ )
+ state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ step("r1: (*, G) cleared from mroute table using show ip mroute")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+ logger.info("Expected behavior: {}".format(result))
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+ write_test_footer(tc_name)
+
+
+def test_add_RP_after_join_received_p1(request):
+ """
+ TC_9_P1 : Verify RP configured after IGMP join received, PIM join towards
+ RP is sent immediately
+
+ Topology used:
+ r0------r1-----r2
+ iperf DUT RP
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on R1 interface")
+ step("Configure r2 loopback interface as RP")
+ step("Enable PIM between r1 and r2")
+ step("Delete RP configuration from r1")
+
+ step("r1: Delete RP configuration")
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify rp-info")
+ dut = "r1"
+ rp_address = "1.0.2.17"
+ iif = "r1-r2-eth1"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("joinTx value before join sent")
+ state_dict = {"r1": {"r1-r2-eth1": ["joinTx"]}}
+ state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step("r0 : Send IGMP join (225.1.1.1) to r1, when rp is not configured" "in r1")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: IGMP group is received on R1 verify using show ip igmp groups")
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1: Verify upstream join state and join timer")
+
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1: Verify PIM state")
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1: Verify ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1: Configure static RP")
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify rp-info")
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify PIM state")
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ logger.info("Expected behavior: {}".format(result))
+
+ state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+ write_test_footer(tc_name)
+
+
+def test_reachable_static_RP_after_join_p0(request):
+ """
+ TC_10_P0 : Verify RP becomes reachable after IGMP join received, PIM join
+ towards RP is sent immediately
+
+ Topology used:
+ r0------r1-----r3
+ iperf DUT RP
+ """
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface and send IGMP " "join (225.1.1.1) to r1")
+ step("Configure r2 loopback interface as RP")
+ step("Enable PIM between r1 and r2")
+
+ step("r1 : Verify pim interface traffic")
+ state_dict = {"r1": {"r1-r2-eth1": ["joinTx"]}}
+ state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step("r1: Make RP un-reachable")
+ dut = "r1"
+ intf = "r1-r2-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+ intf = "r1-r3-eth2"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+ intf = "r1-r4-eth3"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: Verify rp-info")
+ rp_address = "1.0.2.17"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_ADDRESS, "Unknown", rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Send IGMP join for 225.1.1.1")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify IGMP groups")
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify upstream IIF interface")
+ iif = "r1-r2-eth1"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1 : Verify upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1 : Verify PIM state")
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1 : Verify ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1: Make RP reachable")
+ intf = "r1-r2-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+ intf = "r1-r3-eth2"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+ intf = "r1-r4-eth3"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r1 : Verify rp-info")
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify PIM state")
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ logger.info("Expected behavior: {}".format(result))
+
+ step("r1 : Verify pim interface traffic")
+ state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+ write_test_footer(tc_name)
+
+
+def test_send_join_on_higher_preffered_rp_p1(request):
+ """
+ TC_11_P1 : Verify PIM join send towards the higher preferred RP
+ TC_12_P1 : Verify PIM prune send towards the lower preferred RP
+ TC_13_P1 : Verify RPF interface is updated in mroute (kernel) when higher
+ preferred overlapping RP configured
+ TC_14_P1 : Verify IIF and OIL in "show ip pim state" updated properly when
+ higher preferred overlapping RP configured
+ TC_15_P1 : Verify upstream interfaces(IIF) and join state are updated when
+ higher preferred overlapping RP is configured
+ TC_16_P1 : Verify join is send to lower preferred RP, when higher
+ preferred RP gets deleted
+ TC_17_P1 : Verify prune is send to higher preferred RP when higher
+ preferred RP gets deleted
+ TC_18_P1 : Verify RPF interface updated in mroute when higher preferred RP
+ gets deleted
+ TC_19_P1 : Verify IIF and OIL in "show ip pim state" updated when higher
+ preferred overlapping RP is deleted
+ TC_20_P1 : Verfiy PIM upstream IIF updated when higher preferred
+ overlapping RP deleted
+
+ Topology used:
+ _______r2
+ |
+ iperf |
+ r0-----r1
+ |
+ |_______r4
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface")
+ step("Configure RP on r2 (loopback interface) for the group range " "224.0.0.0/4")
+ step("Configure RP on r4 (loopback interface) for the group range " "225.1.1.1/32")
+
+ step("r3 : Make all interface not reachable")
+ dut = "r3"
+ intf = "r3-r1-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+ intf = "r3-r2-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+ intf = "r3-r4-eth2"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ dut = "r2"
+ intf = "r2-r3-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ dut = "r4"
+ intf = "r4-r3-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ dut = "r1"
+ intf = "r1-r3-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1 : Verify joinTx count before sending join")
+ state_dict = {"r1": {"r1-r4-eth3": ["joinTx"], "r1-r2-eth1": ["pruneTx"]}}
+
+ state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step("r0 : Send IGMP join for 225.1.1.1")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify IGMP groups")
+ dut = "r1"
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure static RP for group 225.1.1.1/32")
+ input_dict = {
+ "r4": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.4.17", "group_addr_range": ["225.1.1.1/32"],}]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify RP info for group 224.0.0.0/4")
+ rp_address_1 = "1.0.2.17"
+ iif = "r1-r2-eth1"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address_1, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify RP info for group 225.1.1.1")
+ rp_address_2 = "1.0.4.17"
+ iif = "r1-r4-eth3"
+ result = verify_pim_rp_info(tgen, topo, dut, GROUP_RANGE, iif, rp_address_2, SOURCE)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify join is sent to higher preferred RP")
+ step("r1 : Verify prune is sent to lower preferred RP")
+ state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ step("r1 : Verify ip mroutes")
+ iif = "r1-r4-eth3"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify PIM state")
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("r1 : Verify joinTx, pruneTx count before RP gets deleted")
+ state_dict = {"r1": {"r1-r2-eth1": ["joinTx"], "r1-r4-eth3": ["pruneTx"]}}
+
+ state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ step("r1 : Delete RP configuration for 225.1.1.1")
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.4.17",
+ "group_addr_range": ["225.1.1.1/32"],
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify rp-info for group 224.0.0.0/4")
+ iif = "r1-r2-eth1"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address_1, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1 : Verify rp-info for group 225.1.1.1")
+ iif = "r1-r4-eth3"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE, oif, rp_address_2, SOURCE, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "r1 : Verify RPF interface updated in mroute when higher preferred"
+ "RP gets deleted"
+ )
+ iif = "r1-r2-eth1"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ logger.info("Expected behavior: {}".format(result))
+
+ step(
+ "r1 : Verify IIF and OIL in show ip pim state updated when higher"
+ "preferred overlapping RP is deleted"
+ )
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "r1 : Verfiy upstream IIF updated when higher preferred overlapping"
+ "RP deleted"
+ )
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "r1 : Verify upstream join state and join timer updated when higher"
+ "preferred overlapping RP deleted"
+ )
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "r1 : Verify join is sent to lower preferred RP, when higher"
+ "preferred RP gets deleted"
+ )
+ step(
+ "r1 : Verify prune is sent to higher preferred RP when higher"
+ " preferred RP gets deleted"
+ )
+ state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".format(tc_name, result)
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+ write_test_footer(tc_name)
+
+
+def test_RP_configured_as_LHR_1_p1(request):
+ """
+ TC_21_1_P1: Verify OIF and RPF for (*,G) and (S,G) when static RP configure
+ in LHR router
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+
+ r1 : LHR/RP
+ r3 : FHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface")
+ step("Configure RP on r1 (loopback interface) for the group range" " 224.0.0.0/4")
+ step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+ step("Send the IGMP join from r0")
+ step("Send multicast traffic from r5")
+
+ step("r1 , r2, r3, r4: Delete existing RP configuration" "configure r1(LHR) as RP")
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r3": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r4": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Configure r1(LHR) as RP")
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ "r2": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ "r3": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ "r4": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ shutdown_bringup_interface(tgen, "r1", "lo", False)
+ sleep(5)
+ shutdown_bringup_interface(tgen, "r1", "lo", True)
+ sleep(5)
+
+ step("r1: Verify RP info")
+ dut = "r1"
+ rp_address = "1.0.1.17"
+ iif = "lo"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r0: Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups")
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r5: Send multicast traffic for group 225.1.1.1")
+ result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = "r1-r3-eth2"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = "r3-r5-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+ write_test_footer(tc_name)
+
+
+def test_RP_configured_as_LHR_2_p1(request):
+ """
+ TC_21_2_P1: Verify OIF and RPF for (*,G) and (S,G) when static RP configure
+ in LHR router
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+
+ r1 : LHR/RP
+ r3 : FHR
+
+ """
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface")
+ step("Configure RP on r1 (loopback interface) for the group range" " 224.0.0.0/4")
+ step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+ step("Send multicast traffic from r5")
+ step("Send the IGMP join from r0")
+
+ step("r1, r2, r3, r4: Delete existing RP configuration," "configure r1(LHR) as RP")
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r3": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r4": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1, r2, r3, r4: Configure r1(LHR) as RP")
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ "r2": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ "r3": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ "r4": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify RP info")
+ dut = "r1"
+ rp_address = "1.0.1.17"
+ iif = "lo"
+ result = verify_pim_rp_info(tgen, topo, dut, GROUP_ADDRESS, iif, rp_address, SOURCE)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r5: Send multicast traffic for group 225.1.1.1")
+ result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r0: Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups")
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = "r1-r3-eth2"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = "r3-r5-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+ write_test_footer(tc_name)
+
+
+def test_RP_configured_as_FHR_1_p1(request):
+ """
+ TC_22_1_P1: Verify OIF and RFP for (*,G) and (S,G) when static RP configure
+ in FHR router
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+
+ r1 : LHR
+ r3 : FHR/RP
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface")
+ step("Configure RP on r2 (loopback interface) for the group range" " 225.1.1.0/24")
+ step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+ step("Send the IGMP join from r0")
+ step("Send multicast traffic from r5")
+
+ step("r1, r2, r3, r4: Delete existing RP configuration" "configure r3(FHR) as RP")
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r3": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r4": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ }
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1, r2, r3, r4: Configure r3(FHR) as RP")
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ "r2": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ "r3": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ "r4": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify RP info")
+ dut = "r1"
+ rp_address = "1.0.3.17"
+ iif = "r1-r3-eth2"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r0: Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r0: Verify IGMP groups")
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r5: Send multicast traffic for group 225.1.1.1")
+ result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = "r3-r5-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+ write_test_footer(tc_name)
+
+
+def test_RP_configured_as_FHR_2_p2(request):
+ """
+ TC_22_2_P2: Verify OIF and RFP for (*,G) and (S,G) when static RP configure
+ in FHR router
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+
+ r1 : LHR
+ r3 : FHR/RP
+ """
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface")
+ step("Configure RP on r2 (loopback interface) for the group range" " 225.1.1.0/24")
+ step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+ step("Send multicast traffic from r5")
+ step("Send the IGMP join from r0")
+
+ step("r1, r2, r3, r4: Delete existing RP configuration" "configure r3(FHR) as RP")
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r3": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r4": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1, r2, r3, r4: Configure r3(FHR) as RP")
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ "r2": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ "r3": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ "r4": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify RP info")
+ dut = "r1"
+ rp_address = "1.0.3.17"
+ iif = "r1-r3-eth2"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r5: Send multicast traffic for group 225.1.1.1")
+ result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r0: Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r0: Verify IGMP groups")
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = "r1-r3-eth2"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = "r3-r5-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+ write_test_footer(tc_name)
+
+
+def test_SPT_RPT_path_different_p1(request):
+ """
+ TC_23_P1: Verify (*,G) and (S,G) populated correctly when RPT and SPT path
+ are different
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+
+ r1: LHR
+ r2: RP
+ r3: FHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to r1")
+ step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
+ step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+ step("Send multicast traffic from r3")
+
+ step("r2: Verify RP info")
+ dut = "r2"
+ rp_address = "1.0.2.17"
+ iif = "lo"
+ result = verify_pim_rp_info(tgen, topo, dut, GROUP_ADDRESS, iif, rp_address, SOURCE)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r0: Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups")
+ dut = "r1"
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r5: Send multicast traffic for group 225.1.1.1")
+ result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ iif = "r1-r2-eth1"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = "r1-r3-eth2"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream IIF interface")
+ dut = "r2"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ oif = "r2-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = "r3-r5-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream IIF interface")
+ dut = "r2"
+ iif = "r2-r3-eth1"
+ result = verify_upstream_iif(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, joinState="NotJoined"
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r2: Verify (S, G) ip mroutes")
+ oif = "none"
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+ write_test_footer(tc_name)
+
+
+def test_clear_pim_configuration_p1(request):
+ """
+ TC_25_P1: Verify (*,G) and (S,G) populated correctly after clearing the
+ PIM,IGMP and mroutes joins
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+ | |
+ |_____________|
+ r4
+ r1 : LHR
+ r2 : RP
+ r3 : FHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface")
+ step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
+ step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+ step("Send the IGMP join from r0")
+ step("Send multicast traffic from r5")
+
+ step("r2: Verify RP info")
+ dut = "r2"
+ rp_address = "1.0.2.17"
+ oif = "lo"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, oif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r0: Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups")
+ dut = "r1"
+ iif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, iif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r5: Send multicast traffic for group 225.1.1.1")
+ result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = "r1-r2-eth1"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ oif = "r1-r0-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups timer restarted")
+ result = clear_ip_igmp_interfaces(tgen, dut)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify PIM neighbor timer restarted")
+ result = clear_ip_pim_interfaces(tgen, dut)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify PIM mroute timer restarted")
+ result = clear_ip_mroute_verify(tgen, dut)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ # Uncomment next line for debugging
+ # tgen.mininet_cli()
+
+ write_test_footer(tc_name)
+
+
+def test_restart_pimd_process_p2(request):
+ """
+ TC_26_P2: Restart the PIMd process and verify PIM upstream and mroutes
+ entries
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+ | |
+ |_____________|
+ r4
+ r1 : LHR
+ r2 : RP
+ r3 : FHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to R1")
+ step("Configure RP on r3 (loopback interface) for the group range" " 224.0.0.0/4")
+ step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
+ step("Send multicast traffic from R3")
+ step("Restart the PIMd process")
+
+ step("r2: Verify RP info")
+ dut = "r2"
+ rp_address = "1.0.2.17"
+ oif = "lo"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, oif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r0: Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups")
+ dut = "r1"
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r5: Send multicast traffic for group 225.1.1.1")
+ result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ iif = "r1-r2-eth1"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = "r1-r3-eth2"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream IIF interface")
+ dut = "r2"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ oif = "r2-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = "r3-r5-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r1"
+ iif = "r1-r2-eth1"
+ oil = "r1-r0-eth0"
+ logger.info("waiting for 10 sec to make sure old mroute time is higher")
+ sleep(10)
+ uptime_before = verify_ip_mroutes(
+ tgen, dut, STAR, GROUP_ADDRESS, iif, oil, return_uptime=True, wait=60
+ )
+ assert isinstance(uptime_before, dict), "Testcase{} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1: Kill pimd process")
+ kill_router_daemons(tgen, "r1", ["pimd"])
+
+ step("r1 : Start pimd process")
+ start_router_daemons(tgen, "r1", ["pimd"])
+
+ logger.info("Waiting for 5sec to get PIMd restarted and mroute" " re-learned..")
+ sleep(5)
+
+ uptime_after = verify_ip_mroutes(
+ tgen, dut, STAR, GROUP_ADDRESS, iif, oil, return_uptime=True, wait=10
+ )
+ assert isinstance(uptime_after, dict), "Testcase{} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_mroute_repopulated(uptime_before, uptime_after)
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_multiple_groups_same_RP_address_p2(request):
+ """
+ TC_27_P2: Configure multiple groups (10 grps) with same RP address
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+
+ r1 : LHR
+ r2 : RP
+ r3 : FHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to r1")
+ step("Configure RP on r2 (loopback interface) for the group range" "225.1.1.0/24")
+ step("Enable the PIM on all the interfaces of r1-r2-r3")
+ step("Send multicast traffic from r5 to all the groups")
+ step("r1 : Remove the groups to RP mapping one by one")
+ step("r1: Shut the upstream interfaces")
+ step("r1: No shut the upstream interfaces")
+ step("r1: Configure the RP again")
+ step("r1: Shut the receiver interfaces")
+ step("r1: No Shut the receiver interfaces")
+ step("r2: Verify RP info")
+
+ step("r2: verify rp-info")
+ dut = "r2"
+ rp_address = "1.0.2.17"
+ oif = "lo"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, oif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ GROUP_ADDRESS_LIST = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
+ step("r0: Send IGMP join for 10 groups")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS_LIST, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups")
+ dut = "r1"
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r5: Send multicast traffic for group 225.1.1.1")
+ result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS_LIST, 32, 2500)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = "r1-r2-eth1"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ oif = "r1-r0-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = "r1-r3-eth2"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream IIF interface")
+ dut = "r2"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ oif = "r2-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = "r3-r5-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream IIF interface")
+ dut = "r2"
+ iif = "r2-r3-eth1"
+ result = verify_upstream_iif(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, joinState="NotJoined"
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r2: Verify (S, G) ip mroutes")
+ oif = "none"
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Delete RP configuration")
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
+ dut = "r1"
+ intf = "r1-r2-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: No Shut the interface r1-r2-eth1 from R1 to R2")
+ intf = "r1-r2-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r1: Configure RP")
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Shut the interface r1-r0-eth0 from R1 to R2")
+ intf = "r1-r0-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: No Shut the interface r1-r0-eth0 from R1 to R2")
+ intf = "r1-r0-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = "r1-r2-eth1"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ oif = "r1-r0-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = "r1-r3-eth2"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream IIF interface")
+ dut = "r2"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ oif = "r2-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream IIF interface")
+ dut = "r2"
+ iif = "r2-r3-eth1"
+ result = verify_upstream_iif(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, joinState="NotJoined"
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r2: Verify (S, G) ip mroutes")
+ oif = "none"
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = "r3-r5-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_multiple_groups_different_RP_address_p2(request):
+ """
+ TC_28_P2: Verify IIF and OIL in updated in mroute when upstream interface
+ configure as RP
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+ | |
+ |_____________|
+ r4
+ r1 : LHR
+ r2 & r4 : RP
+ r3 : FHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Delete existing RP configuration")
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": GROUP_RANGE_LIST_1,}]
+ }
+ },
+ "r4": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.4.17", "group_addr_range": GROUP_RANGE_LIST_2,}]
+ }
+ },
+ }
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify RP info")
+ dut = "r2"
+ rp_address = "1.0.2.17"
+ oif = "lo"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_LIST_1, oif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r4: Verify RP info")
+ dut = "r4"
+ rp_address = "1.0.4.17"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_LIST_2, oif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ GROUP_ADDRESS_LIST = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
+ step("r0: Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS_LIST, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups")
+ dut = "r1"
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS_LIST)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r5: Send multicast traffic for group 225.1.1.1")
+ result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS_LIST, 32, 2500)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = "r1-r2-eth1"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = "r1-r3-eth2"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(
+ tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream IIF interface")
+ dut = "r2"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ oif = "r2-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream IIF interface")
+ iif = "r2-r3-eth1"
+ result = verify_upstream_iif(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, joinState="NotJoined"
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r2: Verify (S, G) ip mroutes")
+ oif = "none"
+ result = verify_ip_mroutes(
+ tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = "r3-r5-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(
+ tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = "r1-r4-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ oif = "r1-r0-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = "r1-r3-eth2"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(
+ tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r4: Verify (*, G) upstream IIF interface")
+ dut = "r4"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r4: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r4: Verify (*, G) ip mroutes")
+ oif = "r4-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r4: Verify (S, G) upstream IIF interface")
+ iif = "r4-r3-eth1"
+ result = verify_upstream_iif(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, joinState="NotJoined"
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r4: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r4: Verify (S, G) ip mroutes")
+ oif = "none"
+ result = verify_ip_mroutes(
+ tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = "r3-r5-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(
+ tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Delete RP configuration")
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_LIST_1,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r4": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.4.17",
+ "group_addr_range": GROUP_RANGE_LIST_2,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ }
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1, r2, r3, r4: Re-configure RP")
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": GROUP_RANGE_LIST_1,}]
+ }
+ },
+ "r4": {
+ "pim": {
+ "rp": [{"rp_addr": "1.0.4.17", "group_addr_range": GROUP_RANGE_LIST_2,}]
+ }
+ },
+ }
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
+ dut = "r1"
+ intf = "r1-r2-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: No shut the interface r1-r2-eth1 from R1 to R2")
+ dut = "r1"
+ intf = "r1-r2-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r1: Shut the interface r1-r2-eth1 from R1 to R4")
+ dut = "r1"
+ intf = "r1-r4-eth3"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: No shut the interface r1-r2-eth1 from R1 to r4")
+ dut = "r1"
+ intf = "r1-r4-eth3"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r1: Shut the interface r1-r0-eth0 from R1 to R0")
+ dut = "r1"
+ intf = "r1-r0-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: No Shut the interface r1-r0-eth0 from R1 to R0")
+ dut = "r1"
+ intf = "r1-r0-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = "r1-r2-eth1"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ oif = "r1-r0-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = "r1-r3-eth2"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(
+ tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream IIF interface")
+ dut = "r2"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ oif = "r2-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream IIF interface")
+ iif = "r2-r3-eth1"
+ result = verify_upstream_iif(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, joinState="NotJoined"
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r2: Verify (S, G) ip mroutes")
+ oif = "none"
+ result = verify_ip_mroutes(
+ tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = "r3-r5-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(
+ tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = "r1-r4-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ oif = "r1-r0-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = "r1-r3-eth2"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_ip_mroutes(
+ tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r4: Verify (*, G) upstream IIF interface")
+ dut = "r4"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r4: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r4: Verify (*, G) ip mroutes")
+ oif = "r4-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r4: Verify (S, G) upstream IIF interface")
+ iif = "r4-r3-eth1"
+ result = verify_upstream_iif(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, joinState="NotJoined"
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r4: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r4: Verify (S, G) ip mroutes")
+ oif = "none"
+ result = verify_ip_mroutes(
+ tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = "r3-r5-eth3"
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(
+ tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_shutdown_primary_path_p1(request):
+ """
+ TC_30_P1: Verify IIF and OIL change to other path after shut the primary
+ path
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | |
+ r0-----r1-------------r3
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ # Steps to execute
+ step("Enable IGMP on r1 interface")
+ step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
+ step("r1: Shut the link from r1 to r2")
+ step("r3: Shut the link from r1 to r3")
+ step("r1: No shut the link from r1 to r2")
+ step("r3: No shut the link from r1 to r3")
+
+ step("r1: Verify RP info")
+ dut = "r1"
+ rp_address = "1.0.2.17"
+ iif = "r1-r2-eth1"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r0: Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups")
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ dut = "r2"
+ iif = "lo"
+ oif = "r2-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
+ dut = "r1"
+ intf = "r1-r2-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step(
+ "Verify after shut the R1 to R2 link , verify join is reaching to RP"
+ "via other path"
+ )
+
+ logger.info("Waiting for 110 sec only if test run with crucible")
+
+ step("r1: Verify (*, G) ip mroutes")
+ dut = "r1"
+ iif = "r1-r3-eth2"
+ oif = "r1-r0-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ dut = "r2"
+ iif = "lo"
+ oif = "r2-r3-eth1"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (*, G) ip mroutes")
+ dut = "r3"
+ iif = "r3-r2-eth1"
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Shut the link from R1 to R3 from R3 node")
+ dut = "r3"
+ intf = "r3-r1-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step(
+ "Verify after shut of R1 to R3 link , verify (*,G) entries got"
+ "cleared from all the node R1, R2, R3"
+ )
+
+ step("r1: Verify (*, G) ip mroutes")
+ dut = "r1"
+ iif = "r1-r3-eth2"
+ oif = "r1-r0-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r2: Verify (*, G) ip mroutes")
+ dut = "r2"
+ iif = "lo"
+ oif = "r2-r3-eth1"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (*, G) ip mroutes")
+ dut = "r3"
+ iif = "r3-r2-eth1"
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: No shutdown the link from R1 to R3 from R3 node")
+ dut = "r3"
+ intf = "r3-r1-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r1: Verify (*, G) ip mroutes")
+ dut = "r1"
+ iif = "r1-r3-eth2"
+ oif = "r1-r0-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ dut = "r2"
+ iif = "lo"
+ oif = "r2-r3-eth1"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r3: Verify (*, G) ip mroutes")
+ dut = "r3"
+ iif = "r3-r2-eth1"
+ oif = "r3-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: No shutdown the link from R1 to R2 from R1 node")
+ dut = "r1"
+ intf = "r1-r2-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r1: Verify (*, G) ip mroutes")
+ dut = "r1"
+ iif = "r1-r2-eth1"
+ oif = "r1-r0-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ dut = "r2"
+ iif = "lo"
+ oif = "r2-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_delete_RP_shut_noshut_upstream_interface_p1(request):
+ """
+ TC_31_P1: Verify RP info and (*,G) mroute after deleting the RP and shut /
+ no shut the RPF interface.
+ Topology used:
+ ________r2_____
+ | |
+ iperf | |
+ r0-----r1-------------r3
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface")
+ step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
+ step("r1: Delete the RP config")
+ step("r1: Shut and no shut the upstream interface (R1-R2) connected link")
+ step("r1: Shut and no shut the OIL interface")
+
+ step("r1: Verify RP info")
+ dut = "r1"
+ rp_address = "1.0.2.17"
+ iif = "r1-r2-eth1"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r0: Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups")
+ dut = "r1"
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes created")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes created")
+ dut = "r2"
+ iif = "lo"
+ oif = "r2-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Delete RP configuration")
+
+ # Delete RP configuration
+ input_dict = {
+ "r1": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
+ dut = "r1"
+ intf = "r1-r2-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: No shutdown the interface r1-r2-eth1 from R1 to R2")
+ dut = "r1"
+ intf = "r1-r2-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r1: Shutdown the OIL interface r1-r0-eth0 from R1 to R0 ")
+ dut = "r1"
+ intf = "r1-r0-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: No shutdown the OIL interface r1-r0-eth0 from R1 to R0")
+ dut = "r1"
+ intf = "r1-r0-eth0"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r1: Verify (*, G) ip mroutes cleared")
+ dut = "r1"
+ iif = "r1-r2-eth1"
+ oif = "r1-r0-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r2: Verify (*, G) ip mroutes cleared")
+ dut = "r2"
+ iif = "lo"
+ oif = "r2-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_delete_RP_shut_noshut_RP_interface_p1(request):
+ """
+ TC_32_P1: Verify RP info and (*,G) mroute after deleting the RP and shut/
+ no shut the RPF inteface
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | |
+ r0-----r1-------------r3
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ reset_config_on_routers(tgen)
+ kill_iperf(tgen)
+ clear_ip_mroute(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
+ step("pre-configuration to send IGMP join and multicast traffic")
+ result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
+ assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on r1 interface")
+ step("Configure RP on r2 (lo) for the group range" " 224.0.0.0/4")
+ step("r2: Delete the RP configuration")
+ step("r2: Shut the RP interface (lo)")
+ step("r1: Shut the interface(r1-r2-eth1, r1-r3-eth2) towards rp")
+
+ step("r1: Verify RP info")
+ dut = "r1"
+ rp_address = "1.0.2.17"
+ iif = "r1-r2-eth1"
+ result = verify_pim_rp_info(
+ tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r0: Send IGMP join")
+ result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify IGMP groups")
+ oif = "r1-r0-eth0"
+ result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes created")
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes created")
+ dut = "r2"
+ iif = "lo"
+ oif = "r2-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Delete RP configuration")
+
+ # Delete RP configuration
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r2: Shut the RP interface lo")
+ dut = "r2"
+ intf = "lo"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: Shut the interface r1-r2-eth1 towards RP")
+ dut = "r1"
+ intf = "r1-r2-eth1"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: Shut the interface r1-r3-eth2 towards RP")
+ dut = "r1"
+ intf = "r1-r3-eth2"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: Verify (*, G) ip mroutes cleared")
+ dut = "r1"
+ iif = "r1-r2-eth1"
+ oif = "r1-r0-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r2: Verify (*, G) ip mroutes cleared")
+ dut = "r2"
+ iif = "lo"
+ oif = "r2-r1-eth0"
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.dot b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.dot
new file mode 100644
index 0000000000..2c6d0aab16
--- /dev/null
+++ b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.dot
@@ -0,0 +1,107 @@
+## Color coding:
+#########################
+## Main FRR: #f08080 red
+## Switches: #d0e0d0 gray
+## RIP: #19e3d9 Cyan
+## RIPng: #fcb314 dark yellow
+## OSPFv2: #32b835 Green
+## OSPFv3: #19e3d9 Cyan
+## ISIS IPv4 #fcb314 dark yellow
+## ISIS IPv6 #9a81ec purple
+## BGP IPv4 #eee3d3 beige
+## BGP IPv6 #fdff00 yellow
+##### Colors (see http://www.color-hex.com/)
+
+graph ospf_topo1 {
+ label="ospf dual stack";
+
+ # Routers
+ r1 [
+ label="r1\nrtr-id 1.1.1.1/32",
+ shape=doubleoctagon,
+ fillcolor="#f08080",
+ style=filled,
+ ];
+ r2 [
+ label="r2\nrtr-id 2.2.2.2/32",
+ shape=doubleoctagon,
+ fillcolor="#f08080",
+ style=filled,
+ ];
+ r3 [
+ label="r3\nrtr-id 3.3.3.3/32",
+ shape=doubleoctagon,
+ fillcolor="#f08080",
+ style=filled,
+ ];
+ r4 [
+ label="r4\nrtr-id 4.4.4.4/32",
+ shape=doubleoctagon,
+ fillcolor="#f08080",
+ style=filled,
+ ];
+ r5 [
+ label="r5\nrtr-id 5.5.5.5/32",
+ shape=doubleoctagon,
+ fillcolor="#f08080",
+ style=filled,
+ ];
+
+ # Switches
+ s1 [
+ label="s1\n10.0.13.0/24\n2013:13::/64",
+ shape=oval,
+ fillcolor="#d0e0d0",
+ style=filled,
+ ];
+ s2 [
+ label="s2\n10.0.23.0/24\n2023:23::/64",
+ shape=oval,
+ fillcolor="#d0e0d0",
+ style=filled,
+ ];
+ s3 [
+ label="s3\n10.0.34.0/24\n2034:34::/64",
+ shape=oval,
+ fillcolor="#d0e0d0",
+ style=filled,
+ ];
+ s4 [
+ label="s4\n10.0.24.0/24\n2024:24::/64",
+ shape=oval,
+ fillcolor="#d0e0d0",
+ style=filled,
+ ];
+ s5 [
+ label="s5\n10.0.45.0/24\n2045:45::/64",
+ shape=oval,
+ fillcolor="#d0e0d0",
+ style=filled,
+ ];
+
+ # Connections
+ subgraph cluster1 {
+ label="area 1.1.1.1"
+
+ r1 -- s1 [label="eth0\n.1\n::1"];
+ r3 -- s1 [label="eth0\n.3\n::3"];
+ r3 -- s2 [label="eth1\n.3\n::3"];
+ r2 -- s2 [label="eth0\n.2\n::2"];
+ }
+
+ subgraph cluster0 {
+ label="area 0.0.0.0"
+
+ r3 -- s3 [label="eth2\n.3\n::3"];
+ r4 -- s3 [label="eth0\n.4\n::4"];
+ r2 -- s4 [label="eth1\n.2\n::2"];
+ r4 -- s4 [label="eth1\n.4\n::4"];
+ }
+
+ subgraph cluster2 {
+ label="area 2.2.2.2"
+
+ r4 -- s5 [label="eth2\n.4\n::4"];
+ r5 -- s5 [label="eth0\n.5\n::5"];
+ }
+}
diff --git a/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.jpg b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.jpg
new file mode 100644
index 0000000000..44efda8390
--- /dev/null
+++ b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.jpg
Binary files differ
diff --git a/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.json b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.json
new file mode 100644
index 0000000000..c8a3ce783b
--- /dev/null
+++ b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.json
@@ -0,0 +1,255 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 24
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "r3": {
+ "ipv4": "10.0.13.1/24",
+ "ipv6": "2013:13::1/64",
+ "ospf": {
+ "area": "1.1.1.1",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ },
+ "ospf6": {
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "1.1.1.1",
+ "neighbors": {
+ "r3": {}
+ }
+ },
+ "ospf6": {
+ "router_id": "1.1.1.1",
+ "neighbors": {
+ "r3": {
+ "area": "1.1.1.1"
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "r3": {
+ "ipv4": "10.0.23.2/24",
+ "ipv6": "2023:23::2/64",
+ "ospf": {
+ "area": "1.1.1.1",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ },
+ "ospf6": {
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ },
+ "r4": {
+ "ipv4": "10.0.24.2/24",
+ "ipv6": "2034:34::2/64",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ },
+ "ospf6": {
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "2.2.2.2",
+ "neighbors": {
+ "r3": {},
+ "r4": {}
+ }
+ },
+ "ospf6": {
+ "router_id": "2.2.2.2",
+ "neighbors": {
+ "r3": { "area": "1.1.1.1" },
+ "r4": { "area": "0.0.0.0" }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "r1": {
+ "ipv4": "10.0.13.3/24",
+ "ipv6": "2013:13::3/64",
+ "ospf": {
+ "area": "1.1.1.1",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ },
+ "ospf6": {
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ },
+ "r2": {
+ "ipv4": "10.0.23.3/24",
+ "ipv6": "2023:23::3/64",
+ "ospf": {
+ "area": "1.1.1.1",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ },
+ "ospf6": {
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ },
+ "r4": {
+ "ipv4": "10.0.34.3/24",
+ "ipv6": "2034:34::3/64",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ },
+ "ospf6": {
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "3.3.3.3",
+ "neighbors": {
+ "r1": {},
+ "r2": {},
+ "r4": {}
+ }
+ },
+ "ospf6": {
+ "router_id": "3.3.3.3",
+ "neighbors": {
+ "r1": { "area": "1.1.1.1" },
+ "r2": { "area": "1.1.1.1" },
+ "r4": { "area": "0.0.0.0" }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "r2": {
+ "ipv4": "10.0.24.4/24",
+ "ipv6": "2024:24::4/64",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ },
+ "ospf6": {
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ },
+ "r3": {
+ "ipv4": "10.0.34.4/24",
+ "ipv6": "2034:34::4/64",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ },
+ "ospf6": {
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ },
+ "r5": {
+ "ipv4": "10.0.45.4/24",
+ "ipv6": "2045:45::4/64",
+ "ospf": {
+ "area": "2.2.2.2",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ },
+ "ospf6": {
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "4.4.4.4",
+ "neighbors": {
+ "r2": {},
+ "r3": {},
+ "r5": {}
+ }
+ },
+ "ospf6": {
+ "router_id": "4.4.4.4",
+ "neighbors": {
+ "r2": { "area": "0.0.0.0" },
+ "r3": { "area": "0.0.0.0" },
+ "r5": { "area": "2.2.2.2" }
+ }
+ }
+ },
+ "r5": {
+ "links": {
+ "r4": {
+ "ipv4": "10.0.45.5/24",
+ "ipv6": "2045:45::5/64",
+ "ospf": {
+ "area": "2.2.2.2",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ },
+ "ospf6": {
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "5.5.5.5",
+ "neighbors": {
+ "r4": {}
+ }
+ },
+ "ospf6": {
+ "router_id": "5.5.5.5",
+ "neighbors": {
+ "r4": { "area": "2.2.2.2" }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.py b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.py
new file mode 100644
index 0000000000..5e7802fa04
--- /dev/null
+++ b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+
+import os
+import sys
+import time
+import pytest
+import json
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+from mininet.topo import Topo
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ stop_router,
+ start_router,
+ verify_rib,
+ create_static_routes,
+ step,
+ start_router_daemons,
+ shutdown_bringup_interface,
+ topo_daemons,
+ create_prefix_lists,
+ create_interfaces_cfg,
+ run_frr_cmd,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.ospf import (
+ verify_ospf_neighbor,
+ verify_ospf6_neighbor,
+ create_router_ospf,
+ create_router_ospf6,
+ verify_ospf_summary,
+ redistribute_ospf,
+ verify_ospf_database,
+)
+
+# Global variables
+topo = None
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/test_ospf_dual_stack.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+
+class CreateTopo(Topo):
+ """Test topology builder."""
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """Sets up the pytest environment."""
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen, daemons)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether OSPF converged
+ ospf_covergence_ipv4 = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence_ipv4 is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence_ipv4
+ )
+
+ # Api call verify whether OSPF6 converged
+ ospf_covergence_ipv6 = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence_ipv6 is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence_ipv6
+ )
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+ tgen = get_topogen()
+ # Stop topology and remove tmp files
+ tgen.stop_topology()
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#
+# ##################################
+# Test cases start here.
+# ##################################
+#
+#
+def test_ospf_dual_stack(request):
+ """OSPF test dual stack."""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don't run this test if we have any failure.
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+
+ step("Bring up the base configuration as per the JSON topology")
+ reset_config_on_routers(tgen)
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospf-tilfa-topo1/__init__.py b/tests/topotests/ospf-tilfa-topo1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/__init__.py
diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/ospfd.conf b/tests/topotests/ospf-tilfa-topo1/rt1/ospfd.conf
new file mode 100644
index 0000000000..eaef49225f
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt1/ospfd.conf
@@ -0,0 +1,27 @@
+debug ospf sr
+debug ospf ti-lfa
+!
+interface lo
+!
+interface eth-rt2
+ ip ospf network point-to-point
+!
+interface eth-rt3
+ ip ospf network point-to-point
+!
+router ospf
+ ospf router-id 1.1.1.1
+ network 1.1.1.0/24 area 0.0.0.0
+ network 10.0.0.0/16 area 0.0.0.0
+ area 0.0.0.0 range 10.0.0.0/16
+ area 0.0.0.0 range 1.1.1.0/24
+ capability opaque
+ mpls-te on
+ mpls-te router-address 1.1.1.1
+ router-info area 0.0.0.0
+ passive-interface lo
+ segment-routing on
+ segment-routing global-block 16000 23999
+ segment-routing node-msd 8
+ segment-routing prefix 1.1.1.1/32 index 10
+!
diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/step1/show_ip_route_initial.ref b/tests/topotests/ospf-tilfa-topo1/rt1/step1/show_ip_route_initial.ref
new file mode 100644
index 0000000000..0ad2aaeade
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt1/step1/show_ip_route_initial.ref
@@ -0,0 +1,156 @@
+{
+ "1.1.1.1\/32":[
+ {
+ "prefix":"1.1.1.1\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"lo"
+ }
+ ]
+ },
+ {
+ "prefix":"1.1.1.1\/32",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"lo"
+ }
+ ]
+ }
+ ],
+ "1.1.1.2\/32":[
+ {
+ "prefix":"1.1.1.2\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "1.1.1.3\/32":[
+ {
+ "prefix":"1.1.1.3\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "1.1.1.4\/32":[
+ {
+ "prefix":"1.1.1.4\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "1.1.1.5\/32":[
+ {
+ "prefix":"1.1.1.5\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "10.0.1.0\/24":[
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ },
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "10.0.2.0\/24":[
+ {
+ "prefix":"10.0.2.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ },
+ {
+ "prefix":"10.0.2.0\/24",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "10.0.3.0\/24":[
+ {
+ "prefix":"10.0.3.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "10.0.4.0\/24":[
+ {
+ "prefix":"10.0.4.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "10.0.5.0\/24":[
+ {
+ "prefix":"10.0.5.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ },
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_initial.ref b/tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_initial.ref
new file mode 100644
index 0000000000..0ad2aaeade
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_initial.ref
@@ -0,0 +1,156 @@
+{
+ "1.1.1.1\/32":[
+ {
+ "prefix":"1.1.1.1\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"lo"
+ }
+ ]
+ },
+ {
+ "prefix":"1.1.1.1\/32",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"lo"
+ }
+ ]
+ }
+ ],
+ "1.1.1.2\/32":[
+ {
+ "prefix":"1.1.1.2\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "1.1.1.3\/32":[
+ {
+ "prefix":"1.1.1.3\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "1.1.1.4\/32":[
+ {
+ "prefix":"1.1.1.4\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "1.1.1.5\/32":[
+ {
+ "prefix":"1.1.1.5\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "10.0.1.0\/24":[
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ },
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "10.0.2.0\/24":[
+ {
+ "prefix":"10.0.2.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ },
+ {
+ "prefix":"10.0.2.0\/24",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "10.0.3.0\/24":[
+ {
+ "prefix":"10.0.3.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "10.0.4.0\/24":[
+ {
+ "prefix":"10.0.4.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "10.0.5.0\/24":[
+ {
+ "prefix":"10.0.5.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ },
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_link_protection.ref b/tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_link_protection.ref
new file mode 100644
index 0000000000..968570e193
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_link_protection.ref
@@ -0,0 +1,226 @@
+{
+ "1.1.1.1\/32":[
+ {
+ "prefix":"1.1.1.1\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"lo"
+ }
+ ]
+ },
+ {
+ "prefix":"1.1.1.1\/32",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"lo"
+ }
+ ]
+ }
+ ],
+ "1.1.1.2\/32":[
+ {
+ "prefix":"1.1.1.2\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ }
+ ],
+ "backupNexthops":[
+ {
+ "ip":"10.0.2.2",
+ "labels":[
+ 16050
+ ]
+ }
+ ]
+ }
+ ],
+ "1.1.1.3\/32":[
+ {
+ "prefix":"1.1.1.3\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ],
+ "backupNexthops":[
+ {
+ "ip":"10.0.1.2",
+ "labels":[
+ 16040
+ ]
+ }
+ ]
+ }
+ ],
+ "1.1.1.4\/32":[
+ {
+ "prefix":"1.1.1.4\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2",
+ "labels":[
+ 16040
+ ]
+ }
+ ],
+ "backupNexthops":[
+ {
+ "ip":"10.0.2.2",
+ "labels":[
+ 16050,
+ 16040
+ ]
+ }
+ ]
+ }
+ ],
+ "1.1.1.5\/32":[
+ {
+ "prefix":"1.1.1.5\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3",
+ "labels":[
+ 16050
+ ]
+ }
+ ],
+ "backupNexthops":[
+ {
+ "ip":"10.0.1.2",
+ "labels":[
+ 16040,
+ 16050
+ ]
+ }
+ ]
+ }
+ ],
+ "10.0.1.0\/24":[
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ },
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "10.0.2.0\/24":[
+ {
+ "prefix":"10.0.2.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ },
+ {
+ "prefix":"10.0.2.0\/24",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "10.0.3.0\/24":[
+ {
+ "prefix":"10.0.3.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ }
+ ],
+ "backupNexthops":[
+ {
+ "ip":"10.0.2.2",
+ "labels":[
+ 16050
+ ]
+ }
+ ]
+ }
+ ],
+ "10.0.4.0\/24":[
+ {
+ "prefix":"10.0.4.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ],
+ "backupNexthops":[
+ {
+ "ip":"10.0.1.2",
+ "labels":[
+ 16040
+ ]
+ }
+ ]
+ }
+ ],
+ "10.0.5.0\/24":[
+ {
+ "prefix":"10.0.5.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ },
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ],
+ "backupNexthops":[
+ {
+ "ip":"10.0.1.2",
+ "labels":[
+ 16040
+ ]
+ },
+ {
+ "ip":"10.0.2.2",
+ "labels":[
+ 16050
+ ]
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_initial.ref b/tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_initial.ref
new file mode 100644
index 0000000000..0ad2aaeade
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_initial.ref
@@ -0,0 +1,156 @@
+{
+ "1.1.1.1\/32":[
+ {
+ "prefix":"1.1.1.1\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"lo"
+ }
+ ]
+ },
+ {
+ "prefix":"1.1.1.1\/32",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"lo"
+ }
+ ]
+ }
+ ],
+ "1.1.1.2\/32":[
+ {
+ "prefix":"1.1.1.2\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "1.1.1.3\/32":[
+ {
+ "prefix":"1.1.1.3\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "1.1.1.4\/32":[
+ {
+ "prefix":"1.1.1.4\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "1.1.1.5\/32":[
+ {
+ "prefix":"1.1.1.5\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "10.0.1.0\/24":[
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ },
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "10.0.2.0\/24":[
+ {
+ "prefix":"10.0.2.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ },
+ {
+ "prefix":"10.0.2.0\/24",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "10.0.3.0\/24":[
+ {
+ "prefix":"10.0.3.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "10.0.4.0\/24":[
+ {
+ "prefix":"10.0.4.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "10.0.5.0\/24":[
+ {
+ "prefix":"10.0.5.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ },
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_node_protection.ref b/tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_node_protection.ref
new file mode 100644
index 0000000000..46a80d298e
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_node_protection.ref
@@ -0,0 +1,192 @@
+{
+ "1.1.1.1\/32":[
+ {
+ "prefix":"1.1.1.1\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"lo"
+ }
+ ]
+ },
+ {
+ "prefix":"1.1.1.1\/32",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"lo"
+ }
+ ]
+ }
+ ],
+ "1.1.1.2\/32":[
+ {
+ "prefix":"1.1.1.2\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "1.1.1.3\/32":[
+ {
+ "prefix":"1.1.1.3\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "1.1.1.4\/32":[
+ {
+ "prefix":"1.1.1.4\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2",
+ "labels":[
+ 16040
+ ]
+ }
+ ],
+ "backupNexthops":[
+ {
+ "ip":"10.0.2.2",
+ "labels":[
+ 16050
+ ]
+ }
+ ]
+ }
+ ],
+ "1.1.1.5\/32":[
+ {
+ "prefix":"1.1.1.5\/32",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3",
+ "labels":[
+ 16050
+ ]
+ }
+ ],
+ "backupNexthops":[
+ {
+ "ip":"10.0.1.2",
+ "labels":[
+ 16040
+ ]
+ }
+ ]
+ }
+ ],
+ "10.0.1.0\/24":[
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ },
+ {
+ "prefix":"10.0.1.0\/24",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "10.0.2.0\/24":[
+ {
+ "prefix":"10.0.2.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ },
+ {
+ "prefix":"10.0.2.0\/24",
+ "protocol":"connected",
+ "nexthops":[
+ {
+ "directlyConnected":true,
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "10.0.3.0\/24":[
+ {
+ "prefix":"10.0.3.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ }
+ ]
+ }
+ ],
+ "10.0.4.0\/24":[
+ {
+ "prefix":"10.0.4.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ]
+ }
+ ],
+ "10.0.5.0\/24":[
+ {
+ "prefix":"10.0.5.0\/24",
+ "protocol":"ospf",
+ "nexthops":[
+ {
+ "ip":"10.0.1.2",
+ "interfaceName":"eth-rt2"
+ },
+ {
+ "ip":"10.0.2.2",
+ "interfaceName":"eth-rt3"
+ }
+ ],
+ "backupNexthops":[
+ {
+ "ip":"10.0.2.2",
+ "labels":[
+ 16050
+ ]
+ },
+ {
+ "ip":"10.0.1.2",
+ "labels":[
+ 16040
+ ]
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/zebra.conf b/tests/topotests/ospf-tilfa-topo1/rt1/zebra.conf
new file mode 100644
index 0000000000..bf0e77a17b
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt1/zebra.conf
@@ -0,0 +1,17 @@
+log file zebra.log
+!
+hostname rt1
+!
+interface lo
+ ip address 1.1.1.1/32
+!
+interface eth-rt2
+ ip address 10.0.1.1/24
+!
+interface eth-rt3
+ ip address 10.0.2.1/24
+!
+ip forwarding
+!
+line vty
+!
diff --git a/tests/topotests/ospf-tilfa-topo1/rt2/ospfd.conf b/tests/topotests/ospf-tilfa-topo1/rt2/ospfd.conf
new file mode 100644
index 0000000000..7548aad7f8
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt2/ospfd.conf
@@ -0,0 +1,27 @@
+debug ospf sr
+debug ospf ti-lfa
+!
+interface lo
+!
+interface eth-rt1
+ ip ospf network point-to-point
+!
+interface eth-rt4
+ ip ospf network point-to-point
+!
+router ospf
+ ospf router-id 1.1.1.2
+ network 1.1.1.0/24 area 0.0.0.0
+ network 10.0.0.0/16 area 0.0.0.0
+ area 0.0.0.0 range 10.0.0.0/16
+ area 0.0.0.0 range 1.1.1.0/24
+ capability opaque
+ mpls-te on
+ mpls-te router-address 1.1.1.2
+ router-info area 0.0.0.0
+ passive-interface lo
+ segment-routing on
+ segment-routing global-block 16000 23999
+ segment-routing node-msd 8
+ segment-routing prefix 1.1.1.2/32 index 20
+!
diff --git a/tests/topotests/ospf-tilfa-topo1/rt2/zebra.conf b/tests/topotests/ospf-tilfa-topo1/rt2/zebra.conf
new file mode 100644
index 0000000000..add2933571
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt2/zebra.conf
@@ -0,0 +1,17 @@
+log file zebra.log
+!
+hostname rt2
+!
+interface lo
+ ip address 1.1.1.2/32
+!
+interface eth-rt1
+ ip address 10.0.1.2/24
+!
+interface eth-rt4
+ ip address 10.0.3.1/24
+!
+ip forwarding
+!
+line vty
+!
diff --git a/tests/topotests/ospf-tilfa-topo1/rt3/ospfd.conf b/tests/topotests/ospf-tilfa-topo1/rt3/ospfd.conf
new file mode 100644
index 0000000000..6258295b6f
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt3/ospfd.conf
@@ -0,0 +1,27 @@
+debug ospf sr
+debug ospf ti-lfa
+!
+interface lo
+!
+interface eth-rt1
+ ip ospf network point-to-point
+!
+interface eth-rt5
+ ip ospf network point-to-point
+!
+router ospf
+ ospf router-id 1.1.1.3
+ network 1.1.1.0/24 area 0.0.0.0
+ network 10.0.0.0/16 area 0.0.0.0
+ area 0.0.0.0 range 10.0.0.0/16
+ area 0.0.0.0 range 1.1.1.0/24
+ capability opaque
+ mpls-te on
+ mpls-te router-address 1.1.1.3
+ router-info area 0.0.0.0
+ passive-interface lo
+ segment-routing on
+ segment-routing global-block 16000 23999
+ segment-routing node-msd 8
+ segment-routing prefix 1.1.1.3/32 index 30
+!
diff --git a/tests/topotests/ospf-tilfa-topo1/rt3/zebra.conf b/tests/topotests/ospf-tilfa-topo1/rt3/zebra.conf
new file mode 100644
index 0000000000..1bb64bc585
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt3/zebra.conf
@@ -0,0 +1,17 @@
+log file zebra.log
+!
+hostname rt3
+!
+interface lo
+ ip address 1.1.1.3/32
+!
+interface eth-rt1
+ ip address 10.0.2.2/24
+!
+interface eth-rt5
+ ip address 10.0.4.1/24
+!
+ip forwarding
+!
+line vty
+!
diff --git a/tests/topotests/ospf-tilfa-topo1/rt4/ospfd.conf b/tests/topotests/ospf-tilfa-topo1/rt4/ospfd.conf
new file mode 100644
index 0000000000..ad02214017
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt4/ospfd.conf
@@ -0,0 +1,27 @@
+debug ospf sr
+debug ospf ti-lfa
+!
+interface lo
+!
+interface eth-rt2
+ ip ospf network point-to-point
+!
+interface eth-rt5
+ ip ospf network point-to-point
+!
+router ospf
+ ospf router-id 1.1.1.4
+ network 1.1.1.0/24 area 0.0.0.0
+ network 10.0.0.0/16 area 0.0.0.0
+ area 0.0.0.0 range 10.0.0.0/16
+ area 0.0.0.0 range 1.1.1.0/24
+ capability opaque
+ mpls-te on
+ mpls-te router-address 1.1.1.4
+ router-info area 0.0.0.0
+ passive-interface lo
+ segment-routing on
+ segment-routing global-block 16000 23999
+ segment-routing node-msd 8
+ segment-routing prefix 1.1.1.4/32 index 40
+!
diff --git a/tests/topotests/ospf-tilfa-topo1/rt4/zebra.conf b/tests/topotests/ospf-tilfa-topo1/rt4/zebra.conf
new file mode 100644
index 0000000000..306f0d4925
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt4/zebra.conf
@@ -0,0 +1,17 @@
+log file zebra.log
+!
+hostname rt4
+!
+interface lo
+ ip address 1.1.1.4/32
+!
+interface eth-rt2
+ ip address 10.0.3.2/24
+!
+interface eth-rt5
+ ip address 10.0.5.1/24
+!
+ip forwarding
+!
+line vty
+!
diff --git a/tests/topotests/ospf-tilfa-topo1/rt5/ospfd.conf b/tests/topotests/ospf-tilfa-topo1/rt5/ospfd.conf
new file mode 100644
index 0000000000..1b95858f53
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt5/ospfd.conf
@@ -0,0 +1,27 @@
+debug ospf sr
+debug ospf ti-lfa
+!
+interface lo
+!
+interface eth-rt3
+ ip ospf network point-to-point
+!
+interface eth-rt4
+ ip ospf network point-to-point
+!
+router ospf
+ ospf router-id 1.1.1.5
+ network 1.1.1.0/24 area 0.0.0.0
+ network 10.0.0.0/16 area 0.0.0.0
+ area 0.0.0.0 range 10.0.0.0/16
+ area 0.0.0.0 range 1.1.1.0/24
+ capability opaque
+ mpls-te on
+ mpls-te router-address 1.1.1.5
+ router-info area 0.0.0.0
+ passive-interface lo
+ segment-routing on
+ segment-routing global-block 16000 23999
+ segment-routing node-msd 8
+ segment-routing prefix 1.1.1.5/32 index 50
+!
diff --git a/tests/topotests/ospf-tilfa-topo1/rt5/zebra.conf b/tests/topotests/ospf-tilfa-topo1/rt5/zebra.conf
new file mode 100644
index 0000000000..46f759580e
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/rt5/zebra.conf
@@ -0,0 +1,17 @@
+log file zebra.log
+!
+hostname rt5
+!
+interface lo
+ ip address 1.1.1.5/32
+!
+interface eth-rt3
+ ip address 10.0.4.2/24
+!
+interface eth-rt4
+ ip address 10.0.5.2/24
+!
+ip forwarding
+!
+line vty
+!
diff --git a/tests/topotests/ospf-tilfa-topo1/test_ospf_tilfa_topo1.py b/tests/topotests/ospf-tilfa-topo1/test_ospf_tilfa_topo1.py
new file mode 100644
index 0000000000..eb3ad5d995
--- /dev/null
+++ b/tests/topotests/ospf-tilfa-topo1/test_ospf_tilfa_topo1.py
@@ -0,0 +1,242 @@
+#!/usr/bin/env python
+
+#
+# test_ospf_tilfa_topo1.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2020 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_ospf_tilfa_topo1.py:
+
+This topology is intentionally kept simple, its main purpose is to verify that
+generated backup label stacks are inserted correctly into the RIB. For fancy
+topologies please use the unit test framework provided in `/tests/ospfd`.
+
+
+ +---------+ +---------+
+ | | | |
+ 10.0.1.0/24 eth+rt1| RT2 |eth+rt4 eth+rt2| RT2 |
+ +---------------------+ 2.2.2.2 +---------------------+ 4.4.4.4 |
+ | | | 10.0.3.0/24 | |
+ |eth+rt2 +---------+ +---------+
+ +---------+ eth+rt5|
+ | | |
+ | RT1 | 10.0.5.0/24|
+ | 1.1.1.1 | |
+ | | |
+ +---------+ eth+rt4|
+ |eth+rt3 +---------+ +---------+
+ | | | 10.0.4.0/24 | |
+ +---------------------+ RT3 +---------------------+ RT5 |
+ 10.0.2.0/24 eth+rt1| 3.3.3.3 |eth+rt5 eth-rt3| 5.5.5.5 |
+ | | | |
+ +---------+ +---------+
+"""
+
+import os
+import sys
+import pytest
+import json
+import re
+from time import sleep
+from functools import partial
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+
+class TemplateTopo(Topo):
+ "Test topology builder"
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ #
+ # Define FRR Routers
+ #
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5"]:
+ tgen.add_router(router)
+
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1")
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1")
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2")
+
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3")
+
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ # For all registered routers, load the zebra configuration file
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+
+
+def router_compare_json_output(rname, command, reference):
+ "Compare router JSON output"
+
+ logger.info('Comparing router "%s" "%s" output', rname, command)
+
+ tgen = get_topogen()
+ filename = "{}/{}/{}".format(CWD, rname, reference)
+ expected = json.loads(open(filename).read())
+
+ # Run test function until we get an result. Wait at most 60 seconds.
+ test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected)
+ _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
+ assert diff is None, assertmsg
+
+
+def test_ospf_initial_convergence_step1():
+ logger.info("Test (step 1): check initial convergence")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router_compare_json_output(
+ "rt1",
+ "show ip route json",
+ "step1/show_ip_route_initial.ref",
+ )
+
+def test_ospf_link_protection_step2():
+ logger.info("Test (step 2): check OSPF link protection")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # enable TI-LFA link protection on all interfaces
+ tgen.net["rt1"].cmd(
+ 'vtysh -c "conf t" -c "router ospf" -c "fast-reroute ti-lfa"'
+ )
+
+ router_compare_json_output(
+ "rt1",
+ "show ip route json",
+ "step2/show_ip_route_link_protection.ref",
+ )
+
+ # disable TI-LFA link protection on all interfaces
+ tgen.net["rt1"].cmd(
+ 'vtysh -c "conf t" -c "router ospf" -c "no fast-reroute ti-lfa"'
+ )
+
+ # check if we got back to the initial route table
+ router_compare_json_output(
+ "rt1",
+ "show ip route json",
+ "step2/show_ip_route_initial.ref",
+ )
+
+def test_ospf_node_protection_step3():
+ logger.info("Test (step 3): check OSPF node protection")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # enable TI-LFA node protection on all interfaces
+ tgen.net["rt1"].cmd(
+ 'vtysh -c "conf t" -c "router ospf" -c "fast-reroute ti-lfa node-protection"'
+ )
+
+ router_compare_json_output(
+ "rt1",
+ "show ip route json",
+ "step3/show_ip_route_node_protection.ref",
+ )
+
+ # disable TI-LFA node protection on all interfaces
+ tgen.net["rt1"].cmd(
+ 'vtysh -c "conf t" -c "router ospf" -c "no fast-reroute ti-lfa node-protection"'
+ )
+
+ # check if we got back to the initial route table
+ router_compare_json_output(
+ "rt1",
+ "show ip route json",
+ "step3/show_ip_route_initial.ref",
+ )
+
+# Memory leak test template
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospf6-topo1/r2/ip_6_address.nhg.ref b/tests/topotests/ospf6-topo1/r2/ip_6_address.nhg.ref
new file mode 100644
index 0000000000..032acb5341
--- /dev/null
+++ b/tests/topotests/ospf6-topo1/r2/ip_6_address.nhg.ref
@@ -0,0 +1,10 @@
+fc00:1111:1111:1111::/64 nhid XXXX via fe80::__(r1-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium
+fc00:1:1:1::/64 nhid XXXX via fe80::__(r1-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium
+fc00:2222:2222:2222::/64 nhid XXXX via fc00:2:2:2::1234 dev r2-stubnet proto XXXX metric 20 pref medium
+fc00:2:2:2::/64 dev r2-stubnet proto XXXX metric 256 pref medium
+fc00:3333:3333:3333::/64 nhid XXXX via fe80::__(r3-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium
+fc00:3:3:3::/64 nhid XXXX via fe80::__(r3-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium
+fc00:4444:4444:4444::/64 nhid XXXX via fe80::__(r3-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium
+fc00:4:4:4::/64 nhid XXXX via fe80::__(r3-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium
+fc00:a:a:a::/64 dev r2-sw5 proto XXXX metric 256 pref medium
+fc00:b:b:b::/64 nhid XXXX via fe80::__(r3-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium
diff --git a/tests/topotests/ospf6-topo1/r3/ip_6_address.nhg.ref b/tests/topotests/ospf6-topo1/r3/ip_6_address.nhg.ref
new file mode 100644
index 0000000000..101fcc95b4
--- /dev/null
+++ b/tests/topotests/ospf6-topo1/r3/ip_6_address.nhg.ref
@@ -0,0 +1,10 @@
+fc00:1111:1111:1111::/64 nhid XXXX via fe80::__(r1-sw5)__ dev r3-sw5 proto XXXX metric 20 pref medium
+fc00:1:1:1::/64 nhid XXXX via fe80::__(r1-sw5)__ dev r3-sw5 proto XXXX metric 20 pref medium
+fc00:2222:2222:2222::/64 nhid XXXX via fe80::__(r2-sw5)__ dev r3-sw5 proto XXXX metric 20 pref medium
+fc00:2:2:2::/64 nhid XXXX via fe80::__(r2-sw5)__ dev r3-sw5 proto XXXX metric 20 pref medium
+fc00:3333:3333:3333::/64 nhid XXXX via fc00:3:3:3::1234 dev r3-stubnet proto XXXX metric 20 pref medium
+fc00:3:3:3::/64 dev r3-stubnet proto XXXX metric 256 pref medium
+fc00:4444:4444:4444::/64 nhid XXXX via fe80::__(r4-sw6)__ dev r3-sw6 proto XXXX metric 20 pref medium
+fc00:4:4:4::/64 nhid XXXX via fe80::__(r4-sw6)__ dev r3-sw6 proto XXXX metric 20 pref medium
+fc00:a:a:a::/64 dev r3-sw5 proto XXXX metric 256 pref medium
+fc00:b:b:b::/64 dev r3-sw6 proto XXXX metric 256 pref medium
diff --git a/tests/topotests/ospf6-topo1/r4/ip_6_address.nhg.ref b/tests/topotests/ospf6-topo1/r4/ip_6_address.nhg.ref
new file mode 100644
index 0000000000..4f11670ce3
--- /dev/null
+++ b/tests/topotests/ospf6-topo1/r4/ip_6_address.nhg.ref
@@ -0,0 +1,10 @@
+fc00:1111:1111:1111::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium
+fc00:1:1:1::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium
+fc00:2222:2222:2222::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium
+fc00:2:2:2::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium
+fc00:3333:3333:3333::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium
+fc00:3:3:3::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium
+fc00:4444:4444:4444::/64 nhid XXXX via fc00:4:4:4::1234 dev r4-stubnet proto XXXX metric 20 pref medium
+fc00:4:4:4::/64 dev r4-stubnet proto XXXX metric 256 pref medium
+fc00:a:a:a::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium
+fc00:b:b:b::/64 dev r4-sw6 proto XXXX metric 256 pref medium
diff --git a/tests/topotests/ospf6-topo1/test_ospf6_topo1.py b/tests/topotests/ospf6-topo1/test_ospf6_topo1.py
index 8e3a329f10..c3efb6ff22 100644
--- a/tests/topotests/ospf6-topo1/test_ospf6_topo1.py
+++ b/tests/topotests/ospf6-topo1/test_ospf6_topo1.py
@@ -383,7 +383,15 @@ def test_linux_ipv6_kernel_routingTable():
"Linux Kernel IPv6 Routing Table verification failed for router r%s:\n%s"
% (i, diff)
)
+ else:
+ logger.error(
+ "r{} failed - no nhid ref file: {}".format(i, refTableFile)
+ )
+ assert False, (
+ "Linux Kernel IPv6 Routing Table verification failed for router r%s\n"
+ % (i)
+ )
def test_shutdown_check_stderr():
diff --git a/tests/topotests/ospf_basic_functionality/ospf_chaos.json b/tests/topotests/ospf_basic_functionality/ospf_chaos.json
new file mode 100644
index 0000000000..ed199f181b
--- /dev/null
+++ b/tests/topotests/ospf_basic_functionality/ospf_chaos.json
@@ -0,0 +1,166 @@
+{
+
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 24
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "r1": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.0",
+ "neighbors": {
+ "r1": {},
+ "r2": {},
+ "r3": {}
+ },
+ "redistribute": [{
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "r1": {
+ "links": {
+ "r0": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.1",
+ "neighbors": {
+ "r0": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "r0": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.2",
+ "neighbors": {
+ "r1": {},
+ "r0": {},
+ "r3": {}
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "r0": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.3",
+ "neighbors": {
+ "r0": {},
+ "r1": {},
+ "r2": {}
+ }
+ }
+ }
+ }
+}
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py
new file mode 100644
index 0000000000..37b7528490
--- /dev/null
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py
@@ -0,0 +1,576 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2020 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""OSPF Basic Functionality Automation."""
+import os
+import sys
+import time
+import pytest
+from copy import deepcopy
+import json
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from mininet.topo import Topo
+from lib.topogen import Topogen, get_topogen
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ step,
+ shutdown_bringup_interface,
+ topo_daemons,
+ verify_rib,
+ stop_router, start_router,
+ create_static_routes,
+ start_router_daemons,
+ kill_router_daemons
+)
+
+from lib.ospf import (
+ verify_ospf_neighbor, verify_ospf_rib,
+ create_router_ospf)
+
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+from ipaddress import IPv4Address
+
+
+
+# Global variables
+topo = None
+
+NETWORK = {
+ "ipv4": ["11.0.20.1/32", "11.0.20.2/32", "11.0.20.3/32", "11.0.20.4/32",
+ "11.0.20.5/32"]
+}
+"""
+Topology:
+ Please view in a fixed-width font such as Courier.
+ +---+ A1 +---+
+ +R1 +------------+R2 |
+ +-+-+- +--++
+ | -- -- |
+ | -- A0 -- |
+ A0| ---- |
+ | ---- | A2
+ | -- -- |
+ | -- -- |
+ +-+-+- +-+-+
+ +R0 +-------------+R3 |
+ +---+ A3 +---+
+
+TESTCASES =
+1. Verify ospf functionality after restart ospfd.
+2. Verify ospf functionality after restart FRR service.
+3. Verify ospf functionality when staticd is restarted.
+ """
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/ospf_chaos.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+class CreateTopo(Topo):
+ """
+ Test topology builder.
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+def test_ospf_chaos_tc31_p1(request):
+ """Verify ospf functionality after restart ospfd."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step(
+ "Create static routes(10.0.20.1/32) in R1 and redistribute "
+ "to OSPF using route map.")
+
+ # Create Static routes
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK['ipv4'][0],
+ "no_of_ip": 5,
+ "next_hop": 'Null0',
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ ospf_red_r0 = {
+ "r0": {
+ "ospf": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Verify OSPF neighbors after base config is done.")
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, ("setup_module :Failed \n Error:"
+ " {}".format(ospf_covergence))
+
+ step("Verify that route is advertised to R1.")
+ dut = 'r1'
+ protocol = 'ospf'
+ nh = topo['routers']['r0']['links']['r1']['ipv4'].split('/')[0]
+ result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Kill OSPFd daemon on R0.")
+ kill_router_daemons(tgen, "r0", ["ospfd"])
+
+ step("Verify OSPF neighbors are down after killing ospfd in R0")
+ dut = 'r0'
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut,
+ expected=False)
+ assert ospf_covergence is not True, ("setup_module :Failed \n Error:"
+ " {}".format(ospf_covergence))
+
+ step("Verify that route advertised to R1 are deleted from RIB and FIB.")
+ dut = 'r1'
+ protocol = 'ospf'
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
+ expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Bring up OSPFd daemon on R0.")
+ start_router_daemons(tgen, "r0", ["ospfd"])
+
+ step("Verify OSPF neighbors are up after bringing back ospfd in R0")
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, ("setup_module :Failed \n Error:"
+ " {}".format(ospf_covergence))
+
+ step(
+ "All the neighbours are up and routes are installed before the"
+ " restart. Verify OSPF route table and ip route table.")
+ dut = 'r1'
+ protocol = 'ospf'
+ result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
+ next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Kill OSPFd daemon on R1.")
+ kill_router_daemons(tgen, "r1", ["ospfd"])
+
+ step("Verify OSPF neighbors are down after killing ospfd in R1")
+ dut = 'r1'
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut,
+ expected=False)
+ assert ospf_covergence is not True, ("setup_module :Failed \n Error:"
+ " {}".format(ospf_covergence))
+
+ step("Bring up OSPFd daemon on R1.")
+ start_router_daemons(tgen, "r1", ["ospfd"])
+
+ step("Verify OSPF neighbors are up after bringing back ospfd in R1")
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, ("setup_module :Failed \n Error:"
+ " {}".format(ospf_covergence))
+
+ step(
+ "All the neighbours are up and routes are installed before the"
+ " restart. Verify OSPF route table and ip route table.")
+
+ dut = 'r1'
+ protocol = 'ospf'
+ result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
+ next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ospf_chaos_tc32_p1(request):
+ """Verify ospf functionality after restart FRR service. """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step(
+ "Create static routes(10.0.20.1/32) in R1 and redistribute "
+ "to OSPF using route map.")
+
+ # Create Static routes
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK['ipv4'][0],
+ "no_of_ip": 5,
+ "next_hop": 'Null0',
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ ospf_red_r0 = {
+ "r0": {
+ "ospf": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Verify OSPF neighbors after base config is done.")
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, ("setup_module :Failed \n Error:"
+ " {}".format(ospf_covergence))
+
+ step("Verify that route is advertised to R1.")
+ dut = 'r1'
+ protocol = 'ospf'
+
+ nh = topo['routers']['r0']['links']['r1']['ipv4'].split('/')[0]
+ result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
+ next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Restart frr on R0")
+ stop_router(tgen, 'r0')
+ start_router(tgen, 'r0')
+
+ step("Verify OSPF neighbors are up after restarting R0")
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, ("setup_module :Failed \n Error:"
+ " {}".format(ospf_covergence))
+
+ step(
+ "All the neighbours are up and routes are installed before the"
+ " restart. Verify OSPF route table and ip route table.")
+ dut = 'r1'
+ protocol = 'ospf'
+ result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
+ next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Restart frr on R1")
+ stop_router(tgen, 'r1')
+ start_router(tgen, 'r1')
+
+ step("Verify OSPF neighbors are up after restarting R1")
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, ("setup_module :Failed \n Error:"
+ " {}".format(ospf_covergence))
+
+ step(
+ "All the neighbours are up and routes are installed before the"
+ " restart. Verify OSPF route table and ip route table.")
+ dut = 'r1'
+ protocol = 'ospf'
+ result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
+ next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ospf_chaos_tc34_p1(request):
+ """
+ verify ospf functionality when staticd is restarted.
+
+ Verify ospf functionalitywhen staticroutes are
+ redistributed & Staticd is restarted.
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step(
+ "Create static routes(10.0.20.1/32) in R1 and redistribute "
+ "to OSPF using route map.")
+
+ # Create Static routes
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK['ipv4'][0],
+ "no_of_ip": 5,
+ "next_hop": 'Null0',
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ ospf_red_r0 = {
+ "r0": {
+ "ospf": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Verify OSPF neighbors after base config is done.")
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, ("setup_module :Failed \n Error:"
+ " {}".format(ospf_covergence))
+
+ step("Verify that route is advertised to R1.")
+ dut = 'r1'
+ protocol = 'ospf'
+ nh = topo['routers']['r0']['links']['r1']['ipv4'].split('/')[0]
+ result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
+ next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Kill staticd daemon on R0.")
+ kill_router_daemons(tgen, "r0", ["staticd"])
+
+ step("Verify that route advertised to R1 are deleted from RIB and FIB.")
+ dut = 'r1'
+ protocol = 'ospf'
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
+ expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Bring up staticd daemon on R0.")
+ start_router_daemons(tgen, "r0", ["staticd"])
+
+ step("Verify OSPF neighbors are up after bringing back ospfd in R0")
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, ("setup_module :Failed \n Error:"
+ " {}".format(ospf_covergence))
+
+ step(
+ "All the neighbours are up and routes are installed before the"
+ " restart. Verify OSPF route table and ip route table.")
+ dut = 'r1'
+ protocol = 'ospf'
+ result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
+ next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Kill staticd daemon on R1.")
+ kill_router_daemons(tgen, "r1", ["staticd"])
+
+ step("Bring up staticd daemon on R1.")
+ start_router_daemons(tgen, "r1", ["staticd"])
+
+ step("Verify OSPF neighbors are up after bringing back ospfd in R1")
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, ("setup_module :Failed \n Error:"
+ " {}".format(ospf_covergence))
+
+ step(
+ "All the neighbours are up and routes are installed before the"
+ " restart. Verify OSPF route table and ip route table.")
+
+ dut = 'r1'
+ protocol = 'ospf'
+ result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
+ next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/pim-basic/test_pim.py b/tests/topotests/pim-basic/test_pim.py
index 74a7fbf16e..224b82f1fb 100644
--- a/tests/topotests/pim-basic/test_pim.py
+++ b/tests/topotests/pim-basic/test_pim.py
@@ -31,6 +31,8 @@ import pytest
import json
from functools import partial
+pytestmark = pytest.mark.pimd
+
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
@@ -80,7 +82,7 @@ class PIMTopo(Topo):
sw.add_link(tgen.gears["r1"])
sw.add_link(tgen.gears["r3"])
-@pytest.mark.pim
+
def setup_module(mod):
"Sets up the pytest environment"
tgen = Topogen(PIMTopo, mod.__name__)
diff --git a/tests/topotests/pytest.ini b/tests/topotests/pytest.ini
index d1b18a57bb..0c45a09445 100644
--- a/tests/topotests/pytest.ini
+++ b/tests/topotests/pytest.ini
@@ -1,16 +1,29 @@
# Skip pytests example directory
[pytest]
norecursedirs = .git example-test example-topojson-test lib docker
+
+# Markers
+#
+# Please consult the documentation and discuss with TSC members before applying
+# any changes to this list.
markers =
- babel: Tests that run against BABEL
- bfd: Tests that run against BFDD
- eigrp: Tests that run against EIGRPD
- isis: Tests that run against ISISD
- ldp: Tests that run against LDPD
- ospf: Tests that run against OSPF( v2 and v3 )
- pbr: Tests that run against PBRD
- pim: Tests that run against pim
- rip: Tests that run against RIP, both v4 and v6
+ babeld: Tests that run against BABELD
+ bfdd: Tests that run against BFDD
+ bgpd: Tests that run against BGPD
+ eigrpd: Tests that run against EIGRPD
+ isisd: Tests that run against ISISD
+ ldpd: Tests that run against LDPD
+ nhrpd: Tests that run against NHRPD
+ ospf6d: Tests that run against OSPF6D
+ ospfd: Tests that run against OSPFD
+ pathd: Tests that run against PATHD
+ pbrd: Tests that run against PBRD
+ pimd: Tests that run against PIMD
+ ripd: Tests that run against RIPD
+ ripngd: Tests that run against RIPNGD
+ sharpd: Tests that run against SHARPD
+ staticd: Tests that run against STATICD
+ vrrpd: Tests that run against VRRPD
[topogen]
# Default configuration values
diff --git a/tests/topotests/simple-snmp-test/r1/bgpd.conf b/tests/topotests/simple-snmp-test/r1/bgpd.conf
new file mode 100644
index 0000000000..00d1e17670
--- /dev/null
+++ b/tests/topotests/simple-snmp-test/r1/bgpd.conf
@@ -0,0 +1,6 @@
+log file /tmp/bgpd.log debugging
+!
+router bgp 100
+ bgp router-id 1.1.1.1
+
+agentx
diff --git a/tests/topotests/simple-snmp-test/r1/isisd.conf b/tests/topotests/simple-snmp-test/r1/isisd.conf
new file mode 100644
index 0000000000..b5ca993da3
--- /dev/null
+++ b/tests/topotests/simple-snmp-test/r1/isisd.conf
@@ -0,0 +1,46 @@
+log stdout debugging
+!
+debug isis route-events
+debug isis events
+!
+interface r1-eth0
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ no isis hello padding
+ isis hello-interval 1
+ isis hello-multiplier 3
+ isis network point-to-point
+!
+interface r1-eth1
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ no isis hello padding
+ isis hello-interval 1
+ isis hello-multiplier 3
+ isis network point-to-point
+!
+interface r1-eth2
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ no isis hello padding
+ isis hello-interval 1
+ isis hello-multiplier 3
+ isis network point-to-point
+!
+interface lo
+ ip router isis ISIS1
+ ipv6 router isis ISIS1
+ isis circuit-type level-1
+ isis passive
+ no isis hello padding
+!
+router isis ISIS1
+ net 01.1111.0000.0000.0001.00
+ is-type level-1
+ topology ipv6-unicast
+!
+line vty
+!
diff --git a/tests/topotests/simple-snmp-test/r1/snmpd.conf b/tests/topotests/simple-snmp-test/r1/snmpd.conf
new file mode 100644
index 0000000000..b37911da36
--- /dev/null
+++ b/tests/topotests/simple-snmp-test/r1/snmpd.conf
@@ -0,0 +1,15 @@
+agentAddress udp:1.1.1.1:161
+
+com2sec public 1.1.1.1 public
+
+group public_group v1 public
+group public_group v2c public
+
+access public_group "" any noauth prefix all all none
+
+view all included .1
+
+iquerySecName frr
+rouser frr
+
+master agentx
diff --git a/tests/topotests/simple-snmp-test/r1/zebra.conf b/tests/topotests/simple-snmp-test/r1/zebra.conf
new file mode 100644
index 0000000000..5281d0055d
--- /dev/null
+++ b/tests/topotests/simple-snmp-test/r1/zebra.conf
@@ -0,0 +1,22 @@
+log file zebra.log
+!
+interface r1-eth0
+ ip address 192.168.12.12/24
+ ipv6 address 2000:1:1:12::12/64
+!
+interface r1-eth1
+ ip address 192.168.13.13/24
+ ipv6 address 2000:1:1:13::13/64
+!
+interface r1-eth2
+ ip address 192.168.14.14/24
+ ipv6 address 2000:1:1:14::14/64
+!
+!
+interface lo
+ ip address 1.1.1.1/32
+ ipv6 address 2000:1:1:1::1/128
+!
+!
+!
+line vty
diff --git a/tests/topotests/simple-snmp-test/test_simple_snmp.py b/tests/topotests/simple-snmp-test/test_simple_snmp.py
new file mode 100755
index 0000000000..88ff01bf0a
--- /dev/null
+++ b/tests/topotests/simple-snmp-test/test_simple_snmp.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python
+
+#
+# test_simple_snmp.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2020 by Volta Networks
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_bgp_simple snmp.py: Test snmp infrastructure.
+"""
+
+import os
+import sys
+import json
+from functools import partial
+from time import sleep
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.snmptest import SnmpTester
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+
+class TemplateTopo(Topo):
+ "Test topology builder"
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # This function only purpose is to define allocation and relationship
+ # between routers, switches and hosts.
+ #
+ #
+ # Create routers
+ tgen.add_router("r1")
+
+ # r1-eth0
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+
+ # r1-eth1
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+
+ # r1-eth2
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r1"])
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+
+ # skip tests is SNMP not installed
+ if not os.path.isfile("/usr/sbin/snmpd"):
+ error_msg = "SNMP not installed - skipping"
+ pytest.skip(error_msg)
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+ tgen.start_topology()
+
+ r1 = tgen.gears["r1"]
+
+ router_list = tgen.routers()
+
+ # For all registred routers, load the zebra configuration file
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP,
+ os.path.join(CWD, "{}/bgpd.conf".format(rname)),
+ "-M snmp",
+ )
+ router.load_config(
+ TopoRouter.RD_SNMP,
+ os.path.join(CWD, "{}/snmpd.conf".format(rname)),
+ "-Le -Ivacm_conf,usmConf,iquery -V -DAgentX,trap",
+ )
+
+ # After loading the configurations, this function loads configured daemons.
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+
+
+def test_r1_bgp_version():
+ "Wait for protocol convergence"
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # tgen.mininet_cli()
+ r1 = tgen.net.get("r1")
+ r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
+ assert r1_snmp.test_oid("bgpVersin", None)
+ assert r1_snmp.test_oid("bgpVersion", "10")
+ assert r1_snmp.test_oid_walk("bgpVersion", ["10"])
+ assert r1_snmp.test_oid_walk("bgpVersion", ["10"], ["0"])
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/zebra_rib/r1/iproute.ref b/tests/topotests/zebra_rib/r1/iproute.ref
new file mode 100644
index 0000000000..b28182c2d1
--- /dev/null
+++ b/tests/topotests/zebra_rib/r1/iproute.ref
@@ -0,0 +1,512 @@
+4.5.1.0/24 via 192.168.210.2 dev r1-eth0 metric 4278198272
+4.5.2.0/24 via 192.168.211.2 dev r1-eth1 metric 16777217
+4.5.3.0/24 via 192.168.212.2 dev r1-eth2 metric 167772161
+4.5.3.0/24 via 192.168.213.2 dev r1-eth3 metric 2684354561
+10.0.0.0 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.1 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.2 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.3 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.4 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.5 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.6 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.7 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.8 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.9 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.10 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.11 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.12 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.13 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.14 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.15 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.16 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.17 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.18 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.19 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.20 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.21 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.22 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.23 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.24 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.25 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.26 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.27 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.28 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.29 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.30 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.31 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.32 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.33 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.34 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.35 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.36 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.37 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.38 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.39 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.40 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.41 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.42 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.43 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.45 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.46 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.47 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.48 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.49 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.50 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.51 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.52 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.53 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.54 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.55 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.56 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.57 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.58 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.59 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.60 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.61 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.62 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.63 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.64 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.65 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.66 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.67 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.68 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.69 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.70 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.71 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.72 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.73 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.74 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.75 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.76 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.77 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.78 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.79 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.80 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.81 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.82 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.83 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.84 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.85 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.86 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.87 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.88 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.89 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.90 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.91 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.92 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.93 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.94 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.95 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.96 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.97 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.98 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.99 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.100 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.101 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.102 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.103 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.104 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.105 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.106 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.107 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.108 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.109 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.110 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.111 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.112 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.113 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.114 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.115 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.116 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.117 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.118 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.119 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.120 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.121 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.122 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.123 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.124 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.125 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.126 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.127 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.128 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.129 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.130 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.131 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.132 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.133 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.134 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.135 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.136 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.137 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.138 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.139 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.140 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.141 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.142 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.143 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.144 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.145 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.146 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.147 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.148 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.149 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.150 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.151 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.152 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.153 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.154 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.155 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.156 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.157 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.158 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.159 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.160 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.161 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.162 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.163 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.164 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.165 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.166 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.167 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.168 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.169 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.170 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.171 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.172 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.173 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.174 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.175 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.176 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.177 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.178 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.179 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.180 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.181 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.182 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.183 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.184 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.185 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.186 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.187 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.188 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.189 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.190 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.191 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.192 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.193 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.194 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.195 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.196 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.197 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.198 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.199 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.200 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.201 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.202 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.203 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.204 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.205 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.206 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.207 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.208 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.209 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.210 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.211 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.212 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.213 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.214 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.215 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.216 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.217 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.218 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.219 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.220 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.221 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.222 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.223 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.224 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.225 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.226 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.227 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.228 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.229 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.230 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.231 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.232 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.233 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.234 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.235 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.236 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.237 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.238 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.239 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.240 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.241 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.242 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.243 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.244 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.245 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.246 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.247 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.248 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.249 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.250 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.251 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.252 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.253 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.254 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.0.255 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20
+10.0.1.0 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.1 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.2 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.3 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.4 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.5 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.6 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.7 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.8 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.9 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.10 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.11 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.12 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.13 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.14 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.15 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.16 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.17 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.18 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.19 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.20 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.21 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.22 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.23 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.24 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.25 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.26 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.27 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.28 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.29 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.30 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.31 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.32 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.33 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.34 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.35 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.36 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.37 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.38 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.39 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.40 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.41 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.42 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.43 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.44 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.45 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.46 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.47 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.48 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.49 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.50 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.51 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.52 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.53 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.54 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.55 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.56 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.57 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.58 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.59 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.60 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.61 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.62 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.63 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.64 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.65 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.66 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.67 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.68 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.69 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.70 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.71 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.72 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.73 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.74 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.75 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.76 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.77 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.78 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.79 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.80 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.81 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.82 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.83 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.84 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.85 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.86 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.87 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.88 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.89 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.90 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.91 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.92 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.93 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.94 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.95 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.96 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.97 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.98 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.99 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.100 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.101 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.102 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.103 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.104 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.105 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.106 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.107 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.108 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.109 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.110 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.111 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.112 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.113 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.114 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.115 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.116 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.117 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.118 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.119 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.120 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.121 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.122 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.123 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.124 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.125 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.126 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.127 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.128 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.129 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.130 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.131 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.132 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.133 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.134 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.135 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.136 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.137 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.138 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.139 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.140 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.141 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.142 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.143 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.144 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.145 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.146 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.147 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.148 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.149 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.150 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.151 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.152 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.153 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.154 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.155 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.156 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.157 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.158 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.159 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.160 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.161 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.162 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.163 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.164 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.165 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.166 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.167 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.168 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.169 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.170 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.171 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.172 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.173 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.174 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.175 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.176 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.177 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.178 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.179 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.180 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.181 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.182 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.183 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.184 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.185 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.186 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.187 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.188 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.189 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.190 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.191 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.192 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.193 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.194 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.195 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.196 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.197 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.198 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.199 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.200 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.201 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.202 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.203 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.204 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.205 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.206 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.207 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.208 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.209 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.210 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.211 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.212 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.213 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.214 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.215 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.216 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.217 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.218 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.219 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.220 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.221 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.222 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.223 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.224 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.225 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.226 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.227 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.228 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.229 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.230 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.231 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.232 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.233 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.234 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.235 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.236 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.237 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.238 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.239 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.240 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.241 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.242 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.0.1.243 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20
+10.100.100.100 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.215.1 metric 20
+192.168.210.0/24 dev r1-eth0 proto XXXX scope link src 192.168.210.1
+192.168.211.0/24 dev r1-eth1 proto XXXX scope link src 192.168.211.1
+192.168.212.0/24 dev r1-eth2 proto XXXX scope link src 192.168.212.1
+192.168.213.0/24 dev r1-eth3 proto XXXX scope link src 192.168.213.1
+192.168.214.0/24 dev r1-eth4 proto XXXX scope link src 192.168.214.1
+192.168.215.0/24 dev r1-eth5 proto XXXX scope link src 192.168.215.1
+192.168.216.0/24 dev r1-eth6 proto XXXX scope link src 192.168.216.1
+192.168.217.0/24 dev r1-eth7 proto XXXX scope link src 192.168.217.1
diff --git a/tests/topotests/zebra_rib/r1/sharp_rmap.ref b/tests/topotests/zebra_rib/r1/sharp_rmap.ref
new file mode 100644
index 0000000000..47a9eb6a49
--- /dev/null
+++ b/tests/topotests/zebra_rib/r1/sharp_rmap.ref
@@ -0,0 +1,17 @@
+ZEBRA:
+route-map: sharp Invoked: 500 Optimization: enabled Processed Change: false
+ permit, sequence 10 Invoked 244
+ Match clauses:
+ ip address 10
+ Set clauses:
+ src 192.168.214.1
+ Call clause:
+ Action:
+ Exit routemap
+ permit, sequence 20 Invoked 256
+ Match clauses:
+ Set clauses:
+ src 192.168.213.1
+ Call clause:
+ Action:
+ Exit routemap
diff --git a/tests/topotests/zebra_rib/r1/static_rmap.ref b/tests/topotests/zebra_rib/r1/static_rmap.ref
new file mode 100644
index 0000000000..2de98bd514
--- /dev/null
+++ b/tests/topotests/zebra_rib/r1/static_rmap.ref
@@ -0,0 +1,9 @@
+ZEBRA:
+route-map: static Invoked: 2 Optimization: enabled Processed Change: false
+ permit, sequence 10 Invoked 2
+ Match clauses:
+ Set clauses:
+ src 192.168.215.1
+ Call clause:
+ Action:
+ Exit routemap
diff --git a/tests/topotests/zebra_rib/test_zebra_rib.py b/tests/topotests/zebra_rib/test_zebra_rib.py
index ef4b597206..daf8f7be20 100644
--- a/tests/topotests/zebra_rib/test_zebra_rib.py
+++ b/tests/topotests/zebra_rib/test_zebra_rib.py
@@ -41,6 +41,7 @@ sys.path.append(os.path.join(CWD, "../"))
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
+from time import sleep
# Required to instantiate the topology builder class.
from mininet.topo import Topo
@@ -75,8 +76,9 @@ def setup_module(mod):
router_list = tgen.routers()
for rname, router in router_list.items():
router.load_config(
- TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
- )
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)))
+ router.load_config(
+ TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname)))
# Initialize all routers.
tgen.start_router()
@@ -157,6 +159,111 @@ def test_zebra_kernel_override():
_, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5)
assert result is None, '"r1" JSON output mismatches'
+def test_route_map_usage():
+ "Test that FRR only reruns over routes associated with the routemap"
+ logger.info("Test that FRR runs on selected re's on route-map changes")
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip("Skipped because of previous test failure")
+
+ thisDir = os.path.dirname(os.path.realpath(__file__))
+
+ r1 = tgen.gears["r1"]
+ # set the delay timer to 1 to improve test coverage (HA)
+ r1.vtysh_cmd("conf\nzebra route-map delay-timer 1")
+ r1.vtysh_cmd("conf\nroute-map static permit 10\nset src 192.168.215.1")
+ r1.vtysh_cmd("conf\naccess-list 5 seq 5 permit 10.0.0.44/32")
+ r1.vtysh_cmd("conf\naccess-list 10 seq 5 permit 10.0.1.0/24")
+ r1.vtysh_cmd("conf\nroute-map sharp permit 10\nmatch ip address 10\nset src 192.168.214.1")
+ r1.vtysh_cmd("conf\nroute-map sharp permit 20\nset src 192.168.213.1")
+ r1.vtysh_cmd("conf\nip protocol static route-map static")
+ r1.vtysh_cmd("conf\nip protocol sharp route-map sharp")
+ sleep(4)
+ r1.vtysh_cmd("conf\nip route 10.100.100.100/32 192.168.216.3")
+ r1.vtysh_cmd("conf\nip route 10.100.100.101/32 10.0.0.44")
+ r1.vtysh_cmd("sharp install route 10.0.0.0 nexthop 192.168.216.3 500")
+ sleep(4)
+
+ static_rmapfile = "%s/r1/static_rmap.ref" % (thisDir)
+ expected = open(static_rmapfile).read().rstrip()
+ expected = ('\n'.join(expected.splitlines()) + '\n').rstrip()
+ actual = r1.vtysh_cmd("show route-map static")
+ actual = ('\n'.join(actual.splitlines()) + '\n').rstrip()
+ logger.info("Does the show route-map static command run the correct number of times")
+
+ diff = topotest.get_textdiff(actual, expected,
+ title1 = "Actual Route-map output",
+ title2 = "Expected Route-map output")
+ if diff:
+ logger.info("Actual:")
+ logger.info(actual)
+ logger.info("Expected:")
+ logger.info(expected)
+ srun = r1.vtysh_cmd("show run")
+ srun = ('\n'.join(srun.splitlines()) + '\n').rstrip()
+ logger.info("Show run")
+ logger.info(srun)
+ assert 0, "r1 static route processing:\n"
+
+ sharp_rmapfile = "%s/r1/sharp_rmap.ref" % (thisDir)
+ expected = open(sharp_rmapfile).read().rstrip()
+ expected = ('\n'.join(expected.splitlines()) + '\n').rstrip()
+ actual = r1.vtysh_cmd("show route-map sharp")
+ actual = ('\n'.join(actual.splitlines()) + '\n').rstrip()
+ logger.info("Does the show route-map sharp command run the correct number of times")
+
+ diff = topotest.get_textdiff(actual, expected,
+ title1 = "Actual Route-map output",
+ title2 = "Expected Route-map output")
+ if diff:
+ logger.info("Actual:")
+ logger.info(actual)
+ logger.info("Expected:")
+ logger.info(expected)
+ srun = r1.vtysh_cmd("show run")
+ srun = ('\n'.join(srun.splitlines()) + '\n').rstrip()
+ logger.info("Show run:")
+ logger.info(srun)
+ assert 0, "r1 sharp route-map processing:\n"
+
+ logger.info("Add a extension to the static route-map to see the static route go away")
+ r1.vtysh_cmd("conf\nroute-map sharp deny 5\nmatch ip address 5")
+ sleep(2)
+ # we are only checking the kernel here as that this will give us the implied
+ # testing of both the route-map and staticd withdrawing the route
+ # let's spot check that the routes were installed correctly
+ # in the kernel
+ logger.info("Test that the routes installed are correct")
+ sharp_ipfile = "%s/r1/iproute.ref" % (thisDir)
+ expected = open(sharp_ipfile).read().rstrip()
+ expected = ('\n'.join(expected.splitlines()) + '\n').rstrip()
+ actual = r1.run("ip route show")
+ actual = ('\n'.join(actual.splitlines()) + '\n').rstrip()
+ actual = re.sub(r" nhid [0-9][0-9]", "", actual)
+ actual = re.sub(r" proto sharp", " proto XXXX", actual)
+ actual = re.sub(r" proto static", " proto XXXX", actual)
+ actual = re.sub(r" proto 194", " proto XXXX", actual)
+ actual = re.sub(r" proto 196", " proto XXXX", actual)
+ actual = re.sub(r" proto kernel", " proto XXXX", actual)
+ actual = re.sub(r" proto 2", " proto XXXX", actual)
+ # Some platforms have double spaces? Why??????
+ actual = re.sub(r" proto XXXX ", " proto XXXX ", actual)
+ actual = re.sub(r" metric", " metric", actual)
+ actual = re.sub(r" link ", " link ", actual)
+ diff = topotest.get_textdiff(actual, expected,
+ title1 = "Actual ip route show",
+ title2 = "Expected ip route show")
+
+ if diff:
+ logger.info("Actual:")
+ logger.info(actual)
+ logger.info("Expected:")
+ logger.info(expected)
+ srun = r1.vtysh_cmd("show run")
+ srun = ('\n'.join(srun.splitlines()) + '\n').rstrip()
+ logger.info("Show run:")
+ logger.info(srun)
+ assert 0, "r1 ip route show is not correct:"
def test_memory_leak():
"Run the memory leak test and report results."
diff --git a/tools/.gitignore b/tools/.gitignore
index 85dae7fd36..63a5b61c35 100644
--- a/tools/.gitignore
+++ b/tools/.gitignore
@@ -6,3 +6,5 @@
/watchfrr.sh
/frrinit.sh
/frrcommon.sh
+/frr.service
+/frr@.service
diff --git a/tools/checkpatch.pl b/tools/checkpatch.pl
index c0624d933e..cf15d00796 100755
--- a/tools/checkpatch.pl
+++ b/tools/checkpatch.pl
@@ -5300,8 +5300,8 @@ sub process {
# uncoalesced string fragments
if ($line =~ /$String\s*"/) {
- WARN("STRING_FRAGMENTS",
- "Consecutive strings are generally better as a single string\n" . $herecurr);
+ CHK("STRING_FRAGMENTS",
+ "Consecutive strings are generally better as a single string\n" . $herecurr);
}
# check for non-standard and hex prefixed decimal printf formats
diff --git a/tools/frr-reload.py b/tools/frr-reload.py
index 0aa5f1e516..dca877dbfe 100755
--- a/tools/frr-reload.py
+++ b/tools/frr-reload.py
@@ -580,6 +580,18 @@ end
if line.startswith("!") or line.startswith("#"):
continue
+ if (len(ctx_keys) == 2
+ and ctx_keys[0].startswith('bfd')
+ and ctx_keys[1].startswith('profile ')
+ and line == 'end'):
+ log.debug('LINE %-50s: popping from sub context, %-50s', line, ctx_keys)
+
+ if main_ctx_key:
+ self.save_contexts(ctx_keys, current_context_lines)
+ ctx_keys = copy.deepcopy(main_ctx_key)
+ current_context_lines = []
+ continue
+
# one line contexts
# there is one exception though: ldpd accepts a 'router-id' clause
# as part of its 'mpls ldp' config context. If we are processing
@@ -882,6 +894,22 @@ end
)
ctx_keys.append(line)
+ elif (
+ line.startswith('profile ')
+ and len(ctx_keys) == 1
+ and ctx_keys[0].startswith('bfd')
+ ):
+
+ # Save old context first
+ self.save_contexts(ctx_keys, current_context_lines)
+ current_context_lines = []
+ main_ctx_key = copy.deepcopy(ctx_keys)
+ log.debug(
+ "LINE %-50s: entering BFD profile sub-context, append to ctx_keys",
+ line
+ )
+ ctx_keys.append(line)
+
else:
# Continuing in an existing context, add non-commented lines to it
current_context_lines.append(line)
diff --git a/tools/frr.service b/tools/frr.service.in
index aa45f420fe..836ce06be7 100644
--- a/tools/frr.service
+++ b/tools/frr.service.in
@@ -17,9 +17,10 @@ WatchdogSec=60s
RestartSec=5
Restart=on-abnormal
LimitNOFILE=1024
-ExecStart=/usr/lib/frr/frrinit.sh start
-ExecStop=/usr/lib/frr/frrinit.sh stop
-ExecReload=/usr/lib/frr/frrinit.sh reload
+PIDFile=@CFG_STATE@/watchfrr.pid
+ExecStart=@CFG_SBIN@/frrinit.sh start
+ExecStop=@CFG_SBIN@/frrinit.sh stop
+ExecReload=@CFG_SBIN@/frrinit.sh reload
[Install]
WantedBy=multi-user.target
diff --git a/tools/frr@.service b/tools/frr@.service.in
index 0fa41c74a3..1e5d252325 100644
--- a/tools/frr@.service
+++ b/tools/frr@.service.in
@@ -17,9 +17,10 @@ WatchdogSec=60s
RestartSec=5
Restart=on-abnormal
LimitNOFILE=1024
-ExecStart=/usr/lib/frr/frrinit.sh start %I
-ExecStop=/usr/lib/frr/frrinit.sh stop %I
-ExecReload=/usr/lib/frr/frrinit.sh reload %I
+PIDFile=@CFG_STATE@/%I/watchfrr.pid
+ExecStart=@CFG_SBIN@/frrinit.sh start %I
+ExecStop=@CFG_SBIN@/frrinit.sh stop %I
+ExecReload=@CFG_SBIN@/frrinit.sh reload %I
[Install]
WantedBy=multi-user.target
diff --git a/vrrpd/vrrp.c b/vrrpd/vrrp.c
index 7728717e99..f4f489c3dd 100644
--- a/vrrpd/vrrp.c
+++ b/vrrpd/vrrp.c
@@ -854,7 +854,7 @@ static int vrrp_recv_advertisement(struct vrrp_router *r, struct ipaddr *src,
vrrp_pkt_adver_dump(dumpbuf, sizeof(dumpbuf), pkt);
DEBUGD(&vrrp_dbg_proto,
VRRP_LOGPFX VRRP_LOGPFX_VRID VRRP_LOGPFX_FAM
- "Received VRRP Advertisement from %s:\n%s",
+ "Received VRRP Advertisement from %s: %s",
r->vr->vrid, family2str(r->family), sipstr, dumpbuf);
/* Check that VRID matches our configured VRID */
diff --git a/vrrpd/vrrp_arp.c b/vrrpd/vrrp_arp.c
index 750050e8c3..749b59cc43 100644
--- a/vrrpd/vrrp_arp.c
+++ b/vrrpd/vrrp_arp.c
@@ -170,7 +170,7 @@ void vrrp_garp_send_all(struct vrrp_router *r)
if (ifp->flags & IFF_NOARP) {
zlog_warn(
VRRP_LOGPFX VRRP_LOGPFX_VRID VRRP_LOGPFX_FAM
- "Unable to send gratuitous ARP on %s; has IFF_NOARP\n",
+ "Unable to send gratuitous ARP on %s; has IFF_NOARP",
r->vr->vrid, family2str(r->family), ifp->name);
return;
}
diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c
index a6f9f39a4c..e026a28628 100644
--- a/vtysh/vtysh.c
+++ b/vtysh/vtysh.c
@@ -3797,11 +3797,11 @@ DEFUN_HIDDEN(show_cli_graph_vtysh,
static void vtysh_install_default(enum node_type node)
{
- install_element(node, &config_list_cmd);
- install_element(node, &find_cmd);
- install_element(node, &show_cli_graph_vtysh_cmd);
- install_element(node, &vtysh_output_file_cmd);
- install_element(node, &no_vtysh_output_file_cmd);
+ _install_element(node, &config_list_cmd);
+ _install_element(node, &find_cmd);
+ _install_element(node, &show_cli_graph_vtysh_cmd);
+ _install_element(node, &vtysh_output_file_cmd);
+ _install_element(node, &no_vtysh_output_file_cmd);
}
/* Making connection to protocol daemon. */
@@ -4001,10 +4001,16 @@ static char *vtysh_completion_entry_function(const char *ignore,
void vtysh_readline_init(void)
{
/* readline related settings. */
+ char *disable_bracketed_paste =
+ XSTRDUP(MTYPE_TMP, "set enable-bracketed-paste off");
+
rl_initialize();
+ rl_parse_and_bind(disable_bracketed_paste);
rl_bind_key('?', (rl_command_func_t *)vtysh_rl_describe);
rl_completion_entry_function = vtysh_completion_entry_function;
rl_attempted_completion_function = new_completion;
+
+ XFREE(MTYPE_TMP, disable_bracketed_paste);
}
char *vtysh_prompt(void)
diff --git a/watchfrr/watchfrr.c b/watchfrr/watchfrr.c
index 319bd6a771..441320b193 100644
--- a/watchfrr/watchfrr.c
+++ b/watchfrr/watchfrr.c
@@ -1080,6 +1080,9 @@ static int valid_command(const char *cmd)
{
char *p;
+ if (cmd == NULL)
+ return 0;
+
return ((p = strchr(cmd, '%')) != NULL) && (*(p + 1) == 's')
&& !strchr(p + 1, '%');
}
@@ -1414,7 +1417,7 @@ int main(int argc, char **argv)
} break;
case OPTION_NETNS:
netns_en = true;
- if (strchr(optarg, '/')) {
+ if (optarg && strchr(optarg, '/')) {
fprintf(stderr,
"invalid network namespace name \"%s\" (may not contain slashes)\n",
optarg);
diff --git a/yang/frr-bgp.yang b/yang/frr-bgp.yang
index 2fb5d13fa7..24998a470d 100644
--- a/yang/frr-bgp.yang
+++ b/yang/frr-bgp.yang
@@ -598,6 +598,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast" {
@@ -626,6 +628,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast" {
@@ -654,7 +658,9 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
- }
+
+ uses structure-neighbor-group-filter-config;
+ }
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast" {
uses structure-neighbor-group-add-paths;
@@ -682,6 +688,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast" {
@@ -710,6 +718,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast" {
@@ -734,6 +744,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast" {
@@ -758,6 +770,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn" {
@@ -772,6 +786,8 @@ module frr-bgp {
uses structure-neighbor-route-server;
uses structure-neighbor-group-soft-reconfiguration;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec" {
@@ -780,6 +796,8 @@ module frr-bgp {
uses structure-neighbor-route-server;
uses structure-neighbor-group-soft-reconfiguration;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec" {
@@ -788,6 +806,8 @@ module frr-bgp {
uses structure-neighbor-route-server;
uses structure-neighbor-group-soft-reconfiguration;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-unicast" {
@@ -855,6 +875,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast" {
@@ -883,6 +905,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast" {
@@ -911,6 +935,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast" {
@@ -939,6 +965,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-labeled-unicast" {
@@ -1037,6 +1065,8 @@ module frr-bgp {
uses structure-neighbor-route-server;
uses structure-neighbor-group-soft-reconfiguration;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec" {
@@ -1045,6 +1075,8 @@ module frr-bgp {
uses structure-neighbor-route-server;
uses structure-neighbor-group-soft-reconfiguration;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-unicast" {
@@ -1112,6 +1144,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast" {
@@ -1140,6 +1174,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast" {
@@ -1168,6 +1204,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast" {
@@ -1196,6 +1234,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast" {
@@ -1224,6 +1264,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast" {
@@ -1248,6 +1290,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast" {
@@ -1272,6 +1316,8 @@ module frr-bgp {
uses structure-neighbor-group-soft-reconfiguration;
uses structure-neighbor-weight;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/l2vpn-evpn" {
@@ -1294,6 +1340,8 @@ module frr-bgp {
uses structure-neighbor-route-server;
uses structure-neighbor-group-soft-reconfiguration;
+
+ uses structure-neighbor-group-filter-config;
}
augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec" {
@@ -1302,5 +1350,7 @@ module frr-bgp {
uses structure-neighbor-route-server;
uses structure-neighbor-group-soft-reconfiguration;
+
+ uses structure-neighbor-group-filter-config;
}
}
diff --git a/yang/frr-isisd.yang b/yang/frr-isisd.yang
index 812dd4159d..8757ab6b8b 100644
--- a/yang/frr-isisd.yang
+++ b/yang/frr-isisd.yang
@@ -1043,9 +1043,24 @@ module frr-isisd {
"Dynamic hostname support for IS-IS.";
}
+ leaf attach-send {
+ type boolean;
+ default "true";
+ description
+ "If true, attached bits are sent in LSP if L1/L2 router for inter-area traffic.";
+ }
+
+ leaf attach-receive-ignore {
+ type boolean;
+ default "false";
+ description
+ "If false, attached bits received in LSP, cause default route add, if L1 router for inter-area traffic.";
+ }
+
leaf attached {
type boolean;
default "false";
+ status deprecated;
description
"If true, identify as L1/L2 router for inter-area traffic.";
}
diff --git a/yang/frr-nexthop.yang b/yang/frr-nexthop.yang
index 619514de7d..2df2e2958e 100644
--- a/yang/frr-nexthop.yang
+++ b/yang/frr-nexthop.yang
@@ -61,7 +61,7 @@ module frr-nexthop {
type union {
type inet:ip-address;
type string {
- pattern '';
+ pattern "";
}
}
}
@@ -160,6 +160,7 @@ module frr-nexthop {
description
"The nexthop vrf name, if different from the route.";
}
+
leaf gateway {
type frr-nexthop:optional-ip-address;
description
@@ -173,15 +174,12 @@ module frr-nexthop {
}
leaf bh-type {
- when "../nh-type = 'blackhole'";
type blackhole-type;
description
"A blackhole sub-type, if the nexthop is a blackhole type.";
}
leaf onlink {
- when "../nh-type = 'ip4-ifindex' or
- ../nh-type = 'ip6-ifindex'";
type boolean;
default "false";
description
diff --git a/zebra/debug.c b/zebra/debug.c
index 87a10ea65d..21fa765c63 100644
--- a/zebra/debug.c
+++ b/zebra/debug.c
@@ -41,6 +41,7 @@ unsigned long zebra_debug_dplane;
unsigned long zebra_debug_mlag;
unsigned long zebra_debug_nexthop;
unsigned long zebra_debug_evpn_mh;
+unsigned long zebra_debug_pbr;
DEFINE_HOOK(zebra_debug_show_debugging, (struct vty *vty), (vty));
@@ -122,6 +123,9 @@ DEFUN_NOSH (show_debugging_zebra,
if (IS_ZEBRA_DEBUG_EVPN_MH_NEIGH)
vty_out(vty, " Zebra EVPN-MH Neigh debugging is on\n");
+ if (IS_ZEBRA_DEBUG_PBR)
+ vty_out(vty, " Zebra PBR debugging is on\n");
+
hook_call(zebra_debug_show_debugging, vty);
return CMD_SUCCESS;
}
@@ -318,6 +322,17 @@ DEFUN (debug_zebra_dplane,
return CMD_SUCCESS;
}
+DEFUN (debug_zebra_pbr,
+ debug_zebra_pbr_cmd,
+ "debug zebra pbr",
+ DEBUG_STR
+ "Zebra configuration\n"
+ "Debug zebra pbr events\n")
+{
+ SET_FLAG(zebra_debug_pbr, ZEBRA_DEBUG_PBR);
+ return CMD_SUCCESS;
+}
+
DEFPY (debug_zebra_mlag,
debug_zebra_mlag_cmd,
"[no$no] debug zebra mlag",
@@ -508,6 +523,18 @@ DEFUN (no_debug_zebra_dplane,
return CMD_SUCCESS;
}
+DEFUN (no_debug_zebra_pbr,
+ no_debug_zebra_pbr_cmd,
+ "no debug zebra pbr",
+ NO_STR
+ DEBUG_STR
+ "Zebra configuration\n"
+ "Debug zebra pbr events\n")
+{
+ zebra_debug_pbr = 0;
+ return CMD_SUCCESS;
+}
+
DEFPY (debug_zebra_nexthop,
debug_zebra_nexthop_cmd,
"[no$no] debug zebra nexthop [detail$detail]",
@@ -650,6 +677,11 @@ static int config_write_debug(struct vty *vty)
write++;
}
+ if (IS_ZEBRA_DEBUG_PBR) {
+ vty_out(vty, "debug zebra pbr\n");
+ write++;
+ }
+
return write;
}
@@ -668,6 +700,7 @@ void zebra_debug_init(void)
zebra_debug_evpn_mh = 0;
zebra_debug_nht = 0;
zebra_debug_nexthop = 0;
+ zebra_debug_pbr = 0;
install_node(&debug_node);
@@ -686,6 +719,7 @@ void zebra_debug_init(void)
install_element(ENABLE_NODE, &debug_zebra_dplane_cmd);
install_element(ENABLE_NODE, &debug_zebra_mlag_cmd);
install_element(ENABLE_NODE, &debug_zebra_nexthop_cmd);
+ install_element(ENABLE_NODE, &debug_zebra_pbr_cmd);
install_element(ENABLE_NODE, &no_debug_zebra_events_cmd);
install_element(ENABLE_NODE, &no_debug_zebra_nht_cmd);
install_element(ENABLE_NODE, &no_debug_zebra_mpls_cmd);
@@ -696,6 +730,7 @@ void zebra_debug_init(void)
install_element(ENABLE_NODE, &no_debug_zebra_rib_cmd);
install_element(ENABLE_NODE, &no_debug_zebra_fpm_cmd);
install_element(ENABLE_NODE, &no_debug_zebra_dplane_cmd);
+ install_element(ENABLE_NODE, &no_debug_zebra_pbr_cmd);
install_element(ENABLE_NODE, &debug_zebra_evpn_mh_cmd);
install_element(CONFIG_NODE, &debug_zebra_events_cmd);
@@ -710,6 +745,8 @@ void zebra_debug_init(void)
install_element(CONFIG_NODE, &debug_zebra_fpm_cmd);
install_element(CONFIG_NODE, &debug_zebra_dplane_cmd);
install_element(CONFIG_NODE, &debug_zebra_nexthop_cmd);
+ install_element(CONFIG_NODE, &debug_zebra_pbr_cmd);
+
install_element(CONFIG_NODE, &no_debug_zebra_events_cmd);
install_element(CONFIG_NODE, &no_debug_zebra_nht_cmd);
install_element(CONFIG_NODE, &no_debug_zebra_mpls_cmd);
@@ -720,6 +757,7 @@ void zebra_debug_init(void)
install_element(CONFIG_NODE, &no_debug_zebra_rib_cmd);
install_element(CONFIG_NODE, &no_debug_zebra_fpm_cmd);
install_element(CONFIG_NODE, &no_debug_zebra_dplane_cmd);
+ install_element(CONFIG_NODE, &no_debug_zebra_pbr_cmd);
install_element(CONFIG_NODE, &debug_zebra_mlag_cmd);
install_element(CONFIG_NODE, &debug_zebra_evpn_mh_cmd);
}
diff --git a/zebra/debug.h b/zebra/debug.h
index 8402224f19..86506846ad 100644
--- a/zebra/debug.h
+++ b/zebra/debug.h
@@ -67,6 +67,8 @@ extern "C" {
#define ZEBRA_DEBUG_EVPN_MH_MAC 0x04
#define ZEBRA_DEBUG_EVPN_MH_NEIGH 0x08
+#define ZEBRA_DEBUG_PBR 0x01
+
/* Debug related macro. */
#define IS_ZEBRA_DEBUG_EVENT (zebra_debug_event & ZEBRA_DEBUG_EVENT)
@@ -114,6 +116,8 @@ extern "C" {
#define IS_ZEBRA_DEBUG_EVPN_MH_NEIGH \
(zebra_debug_evpn_mh & ZEBRA_DEBUG_EVPN_MH_NEIGH)
+#define IS_ZEBRA_DEBUG_PBR (zebra_debug_pbr & ZEBRA_DEBUG_PBR)
+
extern unsigned long zebra_debug_event;
extern unsigned long zebra_debug_packet;
extern unsigned long zebra_debug_kernel;
@@ -127,6 +131,7 @@ extern unsigned long zebra_debug_dplane;
extern unsigned long zebra_debug_mlag;
extern unsigned long zebra_debug_nexthop;
extern unsigned long zebra_debug_evpn_mh;
+extern unsigned long zebra_debug_pbr;
extern void zebra_debug_init(void);
diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c
index 51ce59c477..79a5d148a6 100644
--- a/zebra/dplane_fpm_nl.c
+++ b/zebra/dplane_fpm_nl.c
@@ -1052,10 +1052,10 @@ struct fpm_rmac_arg {
bool complete;
};
-static void fpm_enqueue_rmac_table(struct hash_bucket *backet, void *arg)
+static void fpm_enqueue_rmac_table(struct hash_bucket *bucket, void *arg)
{
struct fpm_rmac_arg *fra = arg;
- zebra_mac_t *zrmac = backet->data;
+ zebra_mac_t *zrmac = bucket->data;
struct zebra_if *zif = fra->zl3vni->vxlan_if->info;
const struct zebra_l2info_vxlan *vxl = &zif->l2info.vxl;
struct zebra_if *br_zif;
@@ -1084,10 +1084,10 @@ static void fpm_enqueue_rmac_table(struct hash_bucket *backet, void *arg)
}
}
-static void fpm_enqueue_l3vni_table(struct hash_bucket *backet, void *arg)
+static void fpm_enqueue_l3vni_table(struct hash_bucket *bucket, void *arg)
{
struct fpm_rmac_arg *fra = arg;
- zebra_l3vni_t *zl3vni = backet->data;
+ zebra_l3vni_t *zl3vni = bucket->data;
fra->zl3vni = zl3vni;
hash_iterate(zl3vni->rmac_table, fpm_enqueue_rmac_table, zl3vni);
@@ -1188,16 +1188,16 @@ static int fpm_rib_reset(struct thread *t)
/*
* The next three function will handle RMAC table reset.
*/
-static void fpm_unset_rmac_table(struct hash_bucket *backet, void *arg)
+static void fpm_unset_rmac_table(struct hash_bucket *bucket, void *arg)
{
- zebra_mac_t *zrmac = backet->data;
+ zebra_mac_t *zrmac = bucket->data;
UNSET_FLAG(zrmac->flags, ZEBRA_MAC_FPM_SENT);
}
-static void fpm_unset_l3vni_table(struct hash_bucket *backet, void *arg)
+static void fpm_unset_l3vni_table(struct hash_bucket *bucket, void *arg)
{
- zebra_l3vni_t *zl3vni = backet->data;
+ zebra_l3vni_t *zl3vni = bucket->data;
hash_iterate(zl3vni->rmac_table, fpm_unset_rmac_table, zl3vni);
}
@@ -1236,7 +1236,13 @@ static int fpm_process_queue(struct thread *t)
if (ctx == NULL)
break;
- fpm_nl_enqueue(fnc, ctx);
+ /*
+ * Intentionally ignoring the return value
+ * as that we are ensuring that we can write to
+ * the output data in the STREAM_WRITEABLE
+ * check above, so we can ignore the return
+ */
+ (void)fpm_nl_enqueue(fnc, ctx);
/* Account the processed entries. */
processed_contexts++;
diff --git a/zebra/if_netlink.c b/zebra/if_netlink.c
index e4dd745f42..00471b9645 100644
--- a/zebra/if_netlink.c
+++ b/zebra/if_netlink.c
@@ -301,7 +301,7 @@ static void netlink_vrf_change(struct nlmsghdr *h, struct rtattr *tb,
struct ifinfomsg *ifi;
struct rtattr *linkinfo[IFLA_INFO_MAX + 1];
struct rtattr *attr[IFLA_VRF_MAX + 1];
- struct vrf *vrf;
+ struct vrf *vrf = NULL;
struct zebra_vrf *zvrf;
uint32_t nl_table_id;
@@ -350,11 +350,7 @@ static void netlink_vrf_change(struct nlmsghdr *h, struct rtattr *tb,
}
}
- /*
- * vrf_get is implied creation if it does not exist
- */
- vrf = vrf_get((vrf_id_t)ifi->ifi_index,
- name); // It would create vrf
+ vrf = vrf_update((vrf_id_t)ifi->ifi_index, name);
if (!vrf) {
flog_err(EC_LIB_INTERFACE, "VRF %s id %u not created",
name, ifi->ifi_index);
diff --git a/zebra/interface.c b/zebra/interface.c
index 4072eb1568..fc34a6fb9e 100644
--- a/zebra/interface.c
+++ b/zebra/interface.c
@@ -1029,7 +1029,7 @@ void if_up(struct interface *ifp)
/* Notify the protocol daemons. */
if (ifp->ptm_enable && (ifp->ptm_status == ZEBRA_PTM_STATUS_DOWN)) {
flog_warn(EC_ZEBRA_PTM_NOT_READY,
- "%s: interface %s hasn't passed ptm check\n",
+ "%s: interface %s hasn't passed ptm check",
__func__, ifp->name);
return;
}
@@ -2977,7 +2977,7 @@ int if_ip_address_install(struct interface *ifp, struct prefix *prefix,
dplane_res = dplane_intf_addr_set(ifp, ifc);
if (dplane_res == ZEBRA_DPLANE_REQUEST_FAILURE) {
zlog_debug(
- "dplane can't set interface IP address: %s.\n",
+ "dplane can't set interface IP address: %s.",
dplane_res2str(dplane_res));
return NB_ERR;
}
@@ -3095,7 +3095,7 @@ int if_ip_address_uinstall(struct interface *ifp, struct prefix *prefix)
/* Check current interface address. */
ifc = connected_check_ptp(ifp, prefix, NULL);
if (!ifc) {
- zlog_debug("interface %s Can't find address\n",
+ zlog_debug("interface %s Can't find address",
ifp->name);
return -1;
}
@@ -3106,7 +3106,7 @@ int if_ip_address_uinstall(struct interface *ifp, struct prefix *prefix)
}
if (!ifc) {
- zlog_debug("interface %s Can't find address\n", ifp->name);
+ zlog_debug("interface %s Can't find address", ifp->name);
return -1;
}
UNSET_FLAG(ifc->conf, ZEBRA_IFC_CONFIGURED);
@@ -3122,7 +3122,7 @@ int if_ip_address_uinstall(struct interface *ifp, struct prefix *prefix)
/* This is real route. */
dplane_res = dplane_intf_addr_unset(ifp, ifc);
if (dplane_res == ZEBRA_DPLANE_REQUEST_FAILURE) {
- zlog_debug("Can't unset interface IP address: %s.\n",
+ zlog_debug("Can't unset interface IP address: %s.",
dplane_res2str(dplane_res));
return -1;
}
@@ -3335,7 +3335,7 @@ int if_ipv6_address_install(struct interface *ifp, struct prefix *prefix,
dplane_res = dplane_intf_addr_set(ifp, ifc);
if (dplane_res == ZEBRA_DPLANE_REQUEST_FAILURE) {
zlog_debug(
- "dplane can't set interface IP address: %s.\n",
+ "dplane can't set interface IP address: %s.",
dplane_res2str(dplane_res));
return NB_ERR;
}
diff --git a/zebra/irdp_packet.c b/zebra/irdp_packet.c
index 56fd35a736..6134df9c41 100644
--- a/zebra/irdp_packet.c
+++ b/zebra/irdp_packet.c
@@ -105,7 +105,7 @@ static void parse_irdp_packet(char *p, int len, struct interface *ifp)
if (iplen < ICMP_MINLEN) {
flog_err(EC_ZEBRA_IRDP_LEN_MISMATCH,
- "IRDP: RX ICMP packet too short from %pI4\n",
+ "IRDP: RX ICMP packet too short from %pI4",
&src);
return;
}
@@ -116,7 +116,7 @@ static void parse_irdp_packet(char *p, int len, struct interface *ifp)
len of IP-header) 14+20 */
if (iplen > IRDP_RX_BUF - 34) {
flog_err(EC_ZEBRA_IRDP_LEN_MISMATCH,
- "IRDP: RX ICMP packet too long from %pI4\n",
+ "IRDP: RX ICMP packet too long from %pI4",
&src);
return;
}
@@ -153,7 +153,7 @@ static void parse_irdp_packet(char *p, int len, struct interface *ifp)
&& !(irdp->flags & IF_BROADCAST))) {
flog_warn(
EC_ZEBRA_IRDP_BAD_RX_FLAGS,
- "IRDP: RX illegal from %pI4 to %s while %s operates in %s; Please correct settings\n",
+ "IRDP: RX illegal from %pI4 to %s while %s operates in %s; Please correct settings",
&src,
ntohl(ip->ip_dst.s_addr) == INADDR_ALLRTRS_GROUP
? "multicast"
diff --git a/zebra/kernel_netlink.c b/zebra/kernel_netlink.c
index 5d64f57b3e..c77a357e9f 100644
--- a/zebra/kernel_netlink.c
+++ b/zebra/kernel_netlink.c
@@ -383,7 +383,7 @@ static int netlink_information_fetch(struct nlmsghdr *h, ns_id_t ns_id,
* it to be sent up to us
*/
flog_err(EC_ZEBRA_UNKNOWN_NLMSG,
- "Unknown netlink nlmsg_type %s(%d) vrf %u\n",
+ "Unknown netlink nlmsg_type %s(%d) vrf %u",
nl_msg_type_to_str(h->nlmsg_type), h->nlmsg_type,
ns_id);
break;
@@ -485,10 +485,23 @@ static void netlink_install_filter(int sock, __u32 pid, __u32 dplane_pid)
if (setsockopt(sock, SOL_SOCKET, SO_ATTACH_FILTER, &prog, sizeof(prog))
< 0)
- flog_err_sys(EC_LIB_SOCKET, "Can't install socket filter: %s\n",
+ flog_err_sys(EC_LIB_SOCKET, "Can't install socket filter: %s",
safe_strerror(errno));
}
+void netlink_parse_rtattr_flags(struct rtattr **tb, int max,
+ struct rtattr *rta, int len, unsigned short flags)
+{
+ unsigned short type;
+
+ while (RTA_OK(rta, len)) {
+ type = rta->rta_type & ~flags;
+ if ((type <= max) && (!tb[type]))
+ tb[type] = rta;
+ rta = RTA_NEXT(rta, len);
+ }
+}
+
void netlink_parse_rtattr(struct rtattr **tb, int max, struct rtattr *rta,
int len)
{
@@ -1123,9 +1136,11 @@ static int nl_batch_read_resp(struct nl_batch *bth)
* associated with any dplane context object.
*/
if (ctx == NULL) {
- zlog_debug(
- "%s: skipping unassociated response, seq number %d NS %u",
- __func__, h->nlmsg_seq, bth->zns->ns_id);
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug(
+ "%s: skipping unassociated response, seq number %d NS %u",
+ __func__, h->nlmsg_seq,
+ bth->zns->ns_id);
continue;
}
@@ -1136,8 +1151,9 @@ static int nl_batch_read_resp(struct nl_batch *bth)
dplane_ctx_set_status(
ctx, ZEBRA_DPLANE_REQUEST_FAILURE);
- zlog_debug("%s: netlink error message seq=%d ",
- __func__, h->nlmsg_seq);
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("%s: netlink error message seq=%d ",
+ __func__, h->nlmsg_seq);
continue;
}
@@ -1146,9 +1162,11 @@ static int nl_batch_read_resp(struct nl_batch *bth)
* the error and instead received some other message in an
* unexpected way.
*/
- zlog_debug("%s: ignoring message type 0x%04x(%s) NS %u",
- __func__, h->nlmsg_type,
- nl_msg_type_to_str(h->nlmsg_type), bth->zns->ns_id);
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("%s: ignoring message type 0x%04x(%s) NS %u",
+ __func__, h->nlmsg_type,
+ nl_msg_type_to_str(h->nlmsg_type),
+ bth->zns->ns_id);
}
return 0;
diff --git a/zebra/kernel_netlink.h b/zebra/kernel_netlink.h
index 696f9be4f6..a7b152b31b 100644
--- a/zebra/kernel_netlink.h
+++ b/zebra/kernel_netlink.h
@@ -79,6 +79,9 @@ extern void nl_attr_rtnh_end(struct nlmsghdr *n, struct rtnexthop *rtnh);
extern void netlink_parse_rtattr(struct rtattr **tb, int max,
struct rtattr *rta, int len);
+extern void netlink_parse_rtattr_flags(struct rtattr **tb, int max,
+ struct rtattr *rta, int len,
+ unsigned short flags);
extern void netlink_parse_rtattr_nested(struct rtattr **tb, int max,
struct rtattr *rta);
extern const char *nl_msg_type_to_str(uint16_t msg_type);
diff --git a/zebra/redistribute.c b/zebra/redistribute.c
index 370dbaa240..b0f124ed55 100644
--- a/zebra/redistribute.c
+++ b/zebra/redistribute.c
@@ -206,7 +206,7 @@ void redistribute_update(const struct prefix *p, const struct prefix *src_p,
afi = family2afi(p->family);
if (!afi) {
flog_warn(EC_ZEBRA_REDISTRIBUTE_UNKNOWN_AF,
- "%s: Unknown AFI/SAFI prefix received\n", __func__);
+ "%s: Unknown AFI/SAFI prefix received", __func__);
return;
}
if (!zebra_check_addr(p)) {
@@ -276,7 +276,7 @@ void redistribute_delete(const struct prefix *p, const struct prefix *src_p,
afi = family2afi(p->family);
if (!afi) {
flog_warn(EC_ZEBRA_REDISTRIBUTE_UNKNOWN_AF,
- "%s: Unknown AFI/SAFI prefix received\n",
+ "%s: Unknown AFI/SAFI prefix received",
__func__);
return;
}
diff --git a/zebra/rib.h b/zebra/rib.h
index d653425f0d..86766b8175 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -406,9 +406,8 @@ extern struct route_entry *rib_lookup_ipv4(struct prefix_ipv4 *p,
vrf_id_t vrf_id);
extern void rib_update(enum rib_update_event event);
-extern void rib_update_vrf(vrf_id_t vrf_id, enum rib_update_event event);
extern void rib_update_table(struct route_table *table,
- enum rib_update_event event);
+ enum rib_update_event event, int rtype);
extern int rib_sweep_route(struct thread *t);
extern void rib_sweep_table(struct route_table *table);
extern void rib_close_table(struct route_table *table);
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index 547700d0c5..1cae0b1f9b 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -77,16 +77,7 @@
/* Re-defining as I am unable to include <linux/if_bridge.h> which has the
* UAPI for MAC sync. */
#ifndef _UAPI_LINUX_IF_BRIDGE_H
-/* FDB notification bits for NDA_NOTIFY:
- * - BR_FDB_NFY_STATIC - notify on activity/expire even for a static entry
- * - BR_FDB_NFY_INACTIVE - mark as inactive to avoid double notification,
- * used with BR_FDB_NFY_STATIC (kernel controlled)
- */
-enum {
- BR_FDB_NFY_STATIC,
- BR_FDB_NFY_INACTIVE,
- BR_FDB_NFY_MAX
-};
+#define BR_SPH_LIST_SIZE 10
#endif
static vlanid_t filter_vlan = 0;
@@ -2490,13 +2481,6 @@ static int netlink_nexthop_process_group(struct rtattr **tb,
return count;
}
-#if 0
- // TODO: Need type for something?
- zlog_debug("Nexthop group type: %d",
- *((uint16_t *)RTA_DATA(tb[NHA_GROUP_TYPE])));
-
-#endif
-
for (int i = 0; ((i < count) && (i < z_grp_size)); i++) {
z_grp[i].id = n_grp[i].id;
z_grp[i].weight = n_grp[i].weight + 1;
@@ -2766,11 +2750,23 @@ static ssize_t netlink_neigh_update_msg_encode(
}
if (nfy) {
- if (!nl_attr_put(&req->n, datalen, NDA_NOTIFY,
- &nfy_flags, sizeof(nfy_flags)))
+ struct rtattr *nest;
+
+ nest = nl_attr_nest(&req->n, datalen,
+ NDA_FDB_EXT_ATTRS | NLA_F_NESTED);
+ if (!nest)
+ return 0;
+
+ if (!nl_attr_put(&req->n, datalen, NFEA_ACTIVITY_NOTIFY,
+ &nfy_flags, sizeof(nfy_flags)))
+ return 0;
+ if (!nl_attr_put(&req->n, datalen, NFEA_DONT_REFRESH, NULL, 0))
return 0;
+
+ nl_attr_nest_end(&req->n, nest);
}
+
if (ext) {
if (!nl_attr_put(&req->n, datalen, NDA_EXT_FLAGS, &ext_flags,
sizeof(ext_flags)))
@@ -2855,7 +2851,8 @@ static int netlink_macfdb_change(struct nlmsghdr *h, int len, ns_id_t ns_id)
* validation of the fields.
*/
memset(tb, 0, sizeof tb);
- netlink_parse_rtattr(tb, NDA_MAX, NDA_RTA(ndm), len);
+ netlink_parse_rtattr_flags(tb, NDA_MAX, NDA_RTA(ndm), len,
+ NLA_F_NESTED);
if (!tb[NDA_LLADDR]) {
if (IS_ZEBRA_DEBUG_KERNEL)
@@ -2897,14 +2894,21 @@ static int netlink_macfdb_change(struct nlmsghdr *h, int len, ns_id_t ns_id)
if (ndm->ndm_state & NUD_STALE)
local_inactive = true;
- if (tb[NDA_NOTIFY]) {
- uint8_t nfy_flags;
+ if (tb[NDA_FDB_EXT_ATTRS]) {
+ struct rtattr *attr = tb[NDA_FDB_EXT_ATTRS];
+ struct rtattr *nfea_tb[NFEA_MAX + 1] = {0};
+
+ netlink_parse_rtattr_nested(nfea_tb, NFEA_MAX, attr);
+ if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
+ uint8_t nfy_flags;
- dp_static = true;
- nfy_flags = *(uint8_t *)RTA_DATA(tb[NDA_NOTIFY]);
- /* local activity has not been detected on the entry */
- if (nfy_flags & (1 << BR_FDB_NFY_INACTIVE))
- local_inactive = true;
+ nfy_flags = *(uint8_t *)RTA_DATA(
+ nfea_tb[NFEA_ACTIVITY_NOTIFY]);
+ if (nfy_flags & FDB_NOTIFY_BIT)
+ dp_static = true;
+ if (nfy_flags & FDB_NOTIFY_INACTIVE_BIT)
+ local_inactive = true;
+ }
}
if (IS_ZEBRA_DEBUG_KERNEL)
@@ -3206,12 +3210,12 @@ ssize_t netlink_macfdb_update_ctx(struct zebra_dplane_ctx *ctx, void *data,
} else {
/* local mac */
if (update_flags & DPLANE_MAC_SET_STATIC) {
- nfy_flags |= (1 << BR_FDB_NFY_STATIC);
+ nfy_flags |= FDB_NOTIFY_BIT;
state |= NUD_NOARP;
}
if (update_flags & DPLANE_MAC_SET_INACTIVE)
- nfy_flags |= (1 << BR_FDB_NFY_INACTIVE);
+ nfy_flags |= FDB_NOTIFY_INACTIVE_BIT;
nfy = true;
}
@@ -3339,7 +3343,7 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id)
netlink_handle_5549(ndm, zif, ifp, &ip, false);
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug(
- "\tNeighbor Entry Received is a 5549 entry, finished");
+ " Neighbor Entry Received is a 5549 entry, finished");
return 0;
}
@@ -3368,7 +3372,7 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id)
else {
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug(
- "\tNeighbor Entry received is not on a VLAN or a BRIDGE, ignoring");
+ " Neighbor Entry received is not on a VLAN or a BRIDGE, ignoring");
return 0;
}
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 90c6a24e7b..46171df848 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -62,6 +62,8 @@
#include "zebra/zebra_opaque.h"
#include "zebra/zebra_srte.h"
+static int zapi_nhg_decode(struct stream *s, int cmd, struct zapi_nhg *api_nhg);
+
/* Encoding helpers -------------------------------------------------------- */
static void zserv_encode_interface(struct stream *s, struct interface *ifp)
@@ -102,6 +104,7 @@ static void zserv_encode_vrf(struct stream *s, struct zebra_vrf *zvrf)
struct vrf_data data;
const char *netns_name = zvrf_ns_name(zvrf);
+ memset(&data, 0, sizeof(data));
data.l.table_id = zvrf->table_id;
if (netns_name)
@@ -707,13 +710,13 @@ static int zsend_ipv4_nexthop_lookup_mrib(struct zserv *client,
return zserv_send_message(client, s);
}
-static int nhg_notify(uint16_t type, uint16_t instance, uint32_t id,
- enum zapi_nhg_notify_owner note)
+int zsend_nhg_notify(uint16_t type, uint16_t instance, uint32_t session_id,
+ uint32_t id, enum zapi_nhg_notify_owner note)
{
struct zserv *client;
struct stream *s;
- client = zserv_find_client(type, instance);
+ client = zserv_find_client_session(type, instance, session_id);
if (!client) {
if (IS_ZEBRA_DEBUG_PACKET) {
zlog_debug("Not Notifying Owner: %u(%u) about %u(%d)",
@@ -722,6 +725,10 @@ static int nhg_notify(uint16_t type, uint16_t instance, uint32_t id,
return 0;
}
+ if (IS_ZEBRA_DEBUG_SEND)
+ zlog_debug("%s: type %d, id %d, note %d",
+ __func__, type, id, note);
+
s = stream_new(ZEBRA_MAX_PACKET_SIZ);
stream_reset(s);
@@ -1139,7 +1146,7 @@ static void zread_rnh_register(ZAPI_HANDLER_ARGS)
} else {
flog_err(
EC_ZEBRA_UNKNOWN_FAMILY,
- "rnh_register: Received unknown family type %d\n",
+ "rnh_register: Received unknown family type %d",
p.family);
return;
}
@@ -1230,7 +1237,7 @@ static void zread_rnh_unregister(ZAPI_HANDLER_ARGS)
} else {
flog_err(
EC_ZEBRA_UNKNOWN_FAMILY,
- "rnh_register: Received unknown family type %d\n",
+ "rnh_register: Received unknown family type %d",
p.family);
return;
}
@@ -1280,7 +1287,7 @@ static void zread_fec_register(ZAPI_HANDLER_ARGS)
if (p.family != AF_INET && p.family != AF_INET6) {
flog_err(
EC_ZEBRA_UNKNOWN_FAMILY,
- "fec_register: Received unknown family type %d\n",
+ "fec_register: Received unknown family type %d",
p.family);
return;
}
@@ -1346,7 +1353,7 @@ static void zread_fec_unregister(ZAPI_HANDLER_ARGS)
if (p.family != AF_INET && p.family != AF_INET6) {
flog_err(
EC_ZEBRA_UNKNOWN_FAMILY,
- "fec_unregister: Received unknown family type %d\n",
+ "fec_unregister: Received unknown family type %d",
p.family);
return;
}
@@ -1742,7 +1749,7 @@ static bool zapi_read_nexthops(struct zserv *client, struct prefix *p,
return true;
}
-int zapi_nhg_decode(struct stream *s, int cmd, struct zapi_nhg *api_nhg)
+static int zapi_nhg_decode(struct stream *s, int cmd, struct zapi_nhg *api_nhg)
{
uint16_t i;
struct zapi_nexthop *znh;
@@ -1820,16 +1827,17 @@ static void zread_nhg_del(ZAPI_HANDLER_ARGS)
/*
* Delete the received nhg id
*/
-
nhe = zebra_nhg_proto_del(api_nhg.id, api_nhg.proto);
if (nhe) {
zebra_nhg_decrement_ref(nhe);
- nhg_notify(api_nhg.proto, client->instance, api_nhg.id,
- ZAPI_NHG_REMOVED);
+ zsend_nhg_notify(api_nhg.proto, client->instance,
+ client->session_id, api_nhg.id,
+ ZAPI_NHG_REMOVED);
} else
- nhg_notify(api_nhg.proto, client->instance, api_nhg.id,
- ZAPI_NHG_REMOVE_FAIL);
+ zsend_nhg_notify(api_nhg.proto, client->instance,
+ client->session_id, api_nhg.id,
+ ZAPI_NHG_REMOVE_FAIL);
}
static void zread_nhg_add(ZAPI_HANDLER_ARGS)
@@ -1863,7 +1871,8 @@ static void zread_nhg_add(ZAPI_HANDLER_ARGS)
/*
* Create the nhg
*/
- nhe = zebra_nhg_proto_add(api_nhg.id, api_nhg.proto, nhg, 0);
+ nhe = zebra_nhg_proto_add(api_nhg.id, api_nhg.proto, client->instance,
+ client->session_id, nhg, 0);
nexthop_group_delete(&nhg);
zebra_nhg_backup_free(&bnhg);
@@ -1874,12 +1883,12 @@ static void zread_nhg_add(ZAPI_HANDLER_ARGS)
*
* Resolution is going to need some more work.
*/
- if (nhe)
- nhg_notify(api_nhg.proto, client->instance, api_nhg.id,
- ZAPI_NHG_INSTALLED);
- else
- nhg_notify(api_nhg.proto, client->instance, api_nhg.id,
- ZAPI_NHG_FAIL_INSTALL);
+
+ /* If there's a failure, notify sender immediately */
+ if (nhe == NULL)
+ zsend_nhg_notify(api_nhg.proto, client->instance,
+ client->session_id, api_nhg.id,
+ ZAPI_NHG_FAIL_INSTALL);
}
static void zread_route_add(ZAPI_HANDLER_ARGS)
diff --git a/zebra/zapi_msg.h b/zebra/zapi_msg.h
index 9822d72022..c03278669a 100644
--- a/zebra/zapi_msg.h
+++ b/zebra/zapi_msg.h
@@ -108,6 +108,9 @@ extern int zsend_sr_policy_notify_status(uint32_t color,
extern int zsend_client_close_notify(struct zserv *client,
struct zserv *closed_client);
+int zsend_nhg_notify(uint16_t type, uint16_t instance, uint32_t session_id,
+ uint32_t id, enum zapi_nhg_notify_owner note);
+
#ifdef __cplusplus
}
#endif
diff --git a/zebra/zebra_evpn_mac.c b/zebra/zebra_evpn_mac.c
index 6753bf520c..5227a480fc 100644
--- a/zebra/zebra_evpn_mac.c
+++ b/zebra/zebra_evpn_mac.c
@@ -2302,34 +2302,17 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, zebra_evpn_t *zevpn,
return 0;
}
-int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, struct ethaddr *macaddr,
- struct interface *ifp)
+int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, zebra_mac_t *mac)
{
- zebra_mac_t *mac;
char buf[ETHER_ADDR_STRLEN];
bool old_bgp_ready;
bool new_bgp_ready;
- /* If entry doesn't exist, nothing to do. */
- mac = zebra_evpn_mac_lookup(zevpn, macaddr);
- if (!mac)
- return 0;
-
- /* Is it a local entry? */
- if (!CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL))
- return 0;
-
- if (IS_ZEBRA_DEBUG_VXLAN) {
- char mac_buf[MAC_BUF_SIZE];
- zlog_debug(
- "DEL MAC %s intf %s(%u) VID %u -> VNI %u seq %u flags %snbr count %u",
- prefix_mac2str(macaddr, buf, sizeof(buf)), ifp->name,
- ifp->ifindex, mac->fwd_info.local.vid, zevpn->vni,
- mac->loc_seq,
- zebra_evpn_zebra_mac_flag_dump(mac, mac_buf,
- sizeof(mac_buf)),
- listcount(mac->neigh_list));
- }
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug("DEL MAC %s VNI %u seq %u flags 0x%x nbr count %u",
+ prefix_mac2str(&mac->macaddr, buf, sizeof(buf)),
+ zevpn->vni, mac->loc_seq, mac->flags,
+ listcount(mac->neigh_list));
old_bgp_ready = zebra_evpn_mac_is_ready_for_bgp(mac->flags);
if (zebra_evpn_mac_is_static(mac)) {
@@ -2344,7 +2327,7 @@ int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, struct ethaddr *macaddr,
zlog_debug(
"re-add sync-mac vni %u mac %s es %s seq %d f %s",
zevpn->vni,
- prefix_mac2str(macaddr, buf, sizeof(buf)),
+ prefix_mac2str(&mac->macaddr, buf, sizeof(buf)),
mac->es ? mac->es->esi_str : "-", mac->loc_seq,
zebra_evpn_zebra_mac_flag_dump(
mac, mac_buf, sizeof(mac_buf)));
@@ -2371,7 +2354,7 @@ int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, struct ethaddr *macaddr,
zebra_evpn_process_neigh_on_local_mac_del(zevpn, mac);
/* Remove MAC from BGP. */
- zebra_evpn_mac_send_del_to_client(zevpn->vni, macaddr, mac->flags,
+ zebra_evpn_mac_send_del_to_client(zevpn->vni, &mac->macaddr, mac->flags,
false /* force */);
zebra_evpn_es_mac_deref_entry(mac);
diff --git a/zebra/zebra_evpn_mac.h b/zebra/zebra_evpn_mac.h
index e21b610501..242097907f 100644
--- a/zebra/zebra_evpn_mac.h
+++ b/zebra/zebra_evpn_mac.h
@@ -253,8 +253,7 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, zebra_evpn_t *zevpn,
struct ethaddr *macaddr, vlanid_t vid,
bool sticky, bool local_inactive,
bool dp_static);
-int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, struct ethaddr *macaddr,
- struct interface *ifp);
+int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, zebra_mac_t *mac);
int zebra_evpn_mac_gw_macip_add(struct interface *ifp, zebra_evpn_t *zevpn,
struct ipaddr *ip, zebra_mac_t **macp,
struct ethaddr *macaddr, vlanid_t vlan_id);
diff --git a/zebra/zebra_fpm.c b/zebra/zebra_fpm.c
index 18ccbb79fb..73534c4332 100644
--- a/zebra/zebra_fpm.c
+++ b/zebra/zebra_fpm.c
@@ -70,7 +70,7 @@ DEFINE_MTYPE_STATIC(ZEBRA, FPM_MAC_INFO, "FPM_MAC_INFO");
#define ZFPM_STATS_IVL_SECS 10
#define FPM_MAX_MAC_MSG_LEN 512
-static void zfpm_iterate_rmac_table(struct hash_bucket *backet, void *args);
+static void zfpm_iterate_rmac_table(struct hash_bucket *bucket, void *args);
/*
* Structure that holds state for iterating over all route_node
@@ -1635,10 +1635,10 @@ static int zfpm_trigger_rmac_update(zebra_mac_t *rmac, zebra_l3vni_t *zl3vni,
* Iterate over all the RMAC entries for the given L3VNI
* and enqueue the RMAC for FPM processing.
*/
-static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket *backet,
+static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket *bucket,
void *args)
{
- zebra_mac_t *zrmac = (zebra_mac_t *)backet->data;
+ zebra_mac_t *zrmac = (zebra_mac_t *)bucket->data;
zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)args;
zfpm_trigger_rmac_update(zrmac, zl3vni, false, "RMAC added");
@@ -1649,9 +1649,9 @@ static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket *backet,
* This function iterates over all the L3VNIs to trigger
* FPM updates for RMACs currently available.
*/
-static void zfpm_iterate_rmac_table(struct hash_bucket *backet, void *args)
+static void zfpm_iterate_rmac_table(struct hash_bucket *bucket, void *args)
{
- zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)backet->data;
+ zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)bucket->data;
hash_iterate(zl3vni->rmac_table, zfpm_trigger_rmac_update_wrapper,
(void *)zl3vni);
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index 0a692feb35..2864b96c83 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -43,6 +43,7 @@
#include "zebra_errors.h"
#include "zebra_dplane.h"
#include "zebra/interface.h"
+#include "zebra/zapi_msg.h"
DEFINE_MTYPE_STATIC(ZEBRA, NHG, "Nexthop Group Entry");
DEFINE_MTYPE_STATIC(ZEBRA, NHG_CONNECTED, "Nexthop Group Connected");
@@ -1759,6 +1760,10 @@ static bool nexthop_valid_resolve(const struct nexthop *nexthop,
if (!CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_ACTIVE))
return false;
+ /* Must not be duplicate */
+ if (CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_DUPLICATE))
+ return false;
+
switch (nexthop->type) {
case NEXTHOP_TYPE_IPV4_IFINDEX:
case NEXTHOP_TYPE_IPV6_IFINDEX:
@@ -2105,10 +2110,10 @@ done_with_match:
/* This function verifies reachability of one given nexthop, which can be
* numbered or unnumbered, IPv4 or IPv6. The result is unconditionally stored
* in nexthop->flags field. The nexthop->ifindex will be updated
- * appropriately as well. An existing route map can turn
- * (otherwise active) nexthop into inactive, but not vice versa.
+ * appropriately as well. An existing route map can turn an
+ * otherwise active nexthop into inactive, but not vice versa.
*
- * If it finds a nexthop recursivedly, set the resolved_id
+ * If it finds a nexthop recursively, set the resolved_id
* to match that nexthop's nhg_hash_entry ID;
*
* The return value is the final value of 'ACTIVE' flag.
@@ -2119,7 +2124,7 @@ static unsigned nexthop_active_check(struct route_node *rn,
{
struct interface *ifp;
route_map_result_t ret = RMAP_PERMITMATCH;
- int family;
+ afi_t family;
const struct prefix *p, *src_p;
struct zebra_vrf *zvrf;
@@ -2131,6 +2136,7 @@ static unsigned nexthop_active_check(struct route_node *rn,
family = AFI_IP6;
else
family = 0;
+
switch (nexthop->type) {
case NEXTHOP_TYPE_IFINDEX:
ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
@@ -2641,6 +2647,7 @@ void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx)
EC_ZEBRA_DP_DELETE_FAIL,
"Failed to uninstall Nexthop ID (%u) from the kernel",
id);
+
/* We already free'd the data, nothing to do */
break;
case DPLANE_OP_NH_INSTALL:
@@ -2661,12 +2668,26 @@ void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx)
SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
zebra_nhg_handle_install(nhe);
- } else
+
+ /* If daemon nhg, send it an update */
+ if (nhe->id >= ZEBRA_NHG_PROTO_LOWER)
+ zsend_nhg_notify(nhe->type, nhe->zapi_instance,
+ nhe->zapi_session, nhe->id,
+ ZAPI_NHG_INSTALLED);
+ } else {
+ /* If daemon nhg, send it an update */
+ if (nhe->id >= ZEBRA_NHG_PROTO_LOWER)
+ zsend_nhg_notify(nhe->type, nhe->zapi_instance,
+ nhe->zapi_session, nhe->id,
+ ZAPI_NHG_FAIL_INSTALL);
+
flog_err(
EC_ZEBRA_DP_INSTALL_FAIL,
"Failed to install Nexthop ID (%u) into the kernel",
nhe->id);
+ }
break;
+
case DPLANE_OP_ROUTE_INSTALL:
case DPLANE_OP_ROUTE_UPDATE:
case DPLANE_OP_ROUTE_DELETE:
@@ -2775,6 +2796,7 @@ bool zebra_nhg_proto_nexthops_only(void)
/* Add NHE from upper level proto */
struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type,
+ uint16_t instance, uint32_t session,
struct nexthop_group *nhg, afi_t afi)
{
struct nhg_hash_entry lookup;
@@ -2858,6 +2880,10 @@ struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type,
zebra_nhg_increment_ref(new);
+ /* Capture zapi client info */
+ new->zapi_instance = instance;
+ new->zapi_session = session;
+
zebra_nhg_set_valid_if_active(new);
zebra_nhg_install_kernel(new);
diff --git a/zebra/zebra_nhg.h b/zebra/zebra_nhg.h
index 9382b8c65d..db20f2beaf 100644
--- a/zebra/zebra_nhg.h
+++ b/zebra/zebra_nhg.h
@@ -50,8 +50,14 @@ struct nhg_hash_entry {
uint32_t id;
afi_t afi;
vrf_id_t vrf_id;
+
+ /* Source protocol - zebra or another daemon */
int type;
+ /* zapi instance and session id, for groups from other daemons */
+ uint16_t zapi_instance;
+ uint32_t zapi_session;
+
struct nexthop_group nhg;
/* If supported, a mapping of backup nexthops. */
@@ -292,6 +298,7 @@ zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi);
* Returns allocated NHE on success, otherwise NULL.
*/
struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type,
+ uint16_t instance, uint32_t session,
struct nexthop_group *nhg,
afi_t afi);
diff --git a/zebra/zebra_pbr.c b/zebra/zebra_pbr.c
index c244d2a955..87ab900092 100644
--- a/zebra/zebra_pbr.c
+++ b/zebra/zebra_pbr.c
@@ -32,6 +32,7 @@
#include "zebra/zapi_msg.h"
#include "zebra/zebra_memory.h"
#include "zebra/zserv.h"
+#include "zebra/debug.h"
/* definitions */
DEFINE_MTYPE_STATIC(ZEBRA, PBR_IPTABLE_IFNAME, "PBR interface list")
@@ -499,10 +500,14 @@ void zebra_pbr_add_rule(struct zebra_pbr_rule *rule)
*/
found = pbr_rule_lookup_unique(rule);
- (void)hash_get(zrouter.rules_hash, rule, pbr_rule_alloc_intern);
-
/* If found, this is an update */
if (found) {
+ if (IS_ZEBRA_DEBUG_PBR)
+ zlog_debug(
+ "%s: seq: %d, prior: %d, unique: %d, ifname: %s -- update",
+ __func__, rule->rule.seq, rule->rule.priority,
+ rule->rule.unique, rule->rule.ifname);
+
(void)dplane_pbr_rule_update(found, rule);
if (pbr_rule_release(found))
@@ -510,12 +515,26 @@ void zebra_pbr_add_rule(struct zebra_pbr_rule *rule)
"%s: Rule being updated we know nothing about",
__PRETTY_FUNCTION__);
- } else
+ } else {
+ if (IS_ZEBRA_DEBUG_PBR)
+ zlog_debug(
+ "%s: seq: %d, prior: %d, unique: %d, ifname: %s -- new",
+ __func__, rule->rule.seq, rule->rule.priority,
+ rule->rule.unique, rule->rule.ifname);
+
(void)dplane_pbr_rule_add(rule);
+ }
+
+ (void)hash_get(zrouter.rules_hash, rule, pbr_rule_alloc_intern);
}
void zebra_pbr_del_rule(struct zebra_pbr_rule *rule)
{
+ if (IS_ZEBRA_DEBUG_PBR)
+ zlog_debug("%s: seq: %d, prior: %d, unique: %d, ifname: %s",
+ __func__, rule->rule.seq, rule->rule.priority,
+ rule->rule.unique, rule->rule.ifname);
+
(void)dplane_pbr_rule_delete(rule);
if (pbr_rule_release(rule))
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 7c86735545..c5d977017e 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -745,7 +745,7 @@ void zebra_rib_evaluate_rn_nexthops(struct route_node *rn, uint32_t seq)
if (rnh->seqno == seq) {
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
zlog_debug(
- "\tNode processed and moved already");
+ " Node processed and moved already");
continue;
}
@@ -2323,14 +2323,6 @@ static void process_subq_route(struct listnode *lnode, uint8_t qindex)
UNSET_FLAG(rib_dest_from_rnode(rnode)->flags,
RIB_ROUTE_QUEUED(qindex));
-#if 0
- else
- {
- zlog_debug ("%s: called for route_node (%p, %d) with no ribs",
- __func__, rnode, route_node_get_lock_count(rnode));
- zlog_backtrace(LOG_DEBUG);
- }
-#endif
route_unlock_node(rnode);
}
@@ -3453,7 +3445,8 @@ static void rib_update_route_node(struct route_node *rn, int type)
}
/* Schedule routes of a particular table (address-family) based on event. */
-void rib_update_table(struct route_table *table, enum rib_update_event event)
+void rib_update_table(struct route_table *table, enum rib_update_event event,
+ int rtype)
{
struct route_node *rn;
@@ -3466,12 +3459,12 @@ void rib_update_table(struct route_table *table, enum rib_update_event event)
: NULL;
vrf = zvrf ? zvrf->vrf : NULL;
- zlog_debug("%s: %s VRF %s Table %u event %s", __func__,
+ zlog_debug("%s: %s VRF %s Table %u event %s Route type: %s", __func__,
table->info ? afi2str(
((struct rib_table_info *)table->info)->afi)
: "Unknown",
VRF_LOGNAME(vrf), zvrf ? zvrf->table_id : 0,
- rib_update_event2str(event));
+ rib_update_event2str(event), zebra_route_string(rtype));
}
/* Walk all routes and queue for processing, if appropriate for
@@ -3494,7 +3487,7 @@ void rib_update_table(struct route_table *table, enum rib_update_event event)
break;
case RIB_UPDATE_RMAP_CHANGE:
case RIB_UPDATE_OTHER:
- rib_update_route_node(rn, ZEBRA_ROUTE_ALL);
+ rib_update_route_node(rn, rtype);
break;
default:
break;
@@ -3502,7 +3495,8 @@ void rib_update_table(struct route_table *table, enum rib_update_event event)
}
}
-static void rib_update_handle_vrf(vrf_id_t vrf_id, enum rib_update_event event)
+static void rib_update_handle_vrf(vrf_id_t vrf_id, enum rib_update_event event,
+ int rtype)
{
struct route_table *table;
@@ -3513,14 +3507,14 @@ static void rib_update_handle_vrf(vrf_id_t vrf_id, enum rib_update_event event)
/* Process routes of interested address-families. */
table = zebra_vrf_table(AFI_IP, SAFI_UNICAST, vrf_id);
if (table)
- rib_update_table(table, event);
+ rib_update_table(table, event, rtype);
table = zebra_vrf_table(AFI_IP6, SAFI_UNICAST, vrf_id);
if (table)
- rib_update_table(table, event);
+ rib_update_table(table, event, rtype);
}
-static void rib_update_handle_vrf_all(enum rib_update_event event)
+static void rib_update_handle_vrf_all(enum rib_update_event event, int rtype)
{
struct zebra_router_table *zrt;
@@ -3530,7 +3524,7 @@ static void rib_update_handle_vrf_all(enum rib_update_event event)
/* Just iterate over all the route tables, rather than vrf lookups */
RB_FOREACH (zrt, zebra_router_table_head, &zrouter.tables)
- rib_update_table(zrt->table, event);
+ rib_update_table(zrt->table, event, rtype);
}
struct rib_update_ctx {
@@ -3564,9 +3558,9 @@ static int rib_update_handler(struct thread *thread)
ctx = THREAD_ARG(thread);
if (ctx->vrf_all)
- rib_update_handle_vrf_all(ctx->event);
+ rib_update_handle_vrf_all(ctx->event, ZEBRA_ROUTE_ALL);
else
- rib_update_handle_vrf(ctx->vrf_id, ctx->event);
+ rib_update_handle_vrf(ctx->vrf_id, ctx->event, ZEBRA_ROUTE_ALL);
rib_update_ctx_fini(&ctx);
@@ -3579,26 +3573,6 @@ static int rib_update_handler(struct thread *thread)
*/
static struct thread *t_rib_update_threads[RIB_UPDATE_MAX];
-/* Schedule a RIB update event for specific vrf */
-void rib_update_vrf(vrf_id_t vrf_id, enum rib_update_event event)
-{
- struct rib_update_ctx *ctx;
-
- ctx = rib_update_ctx_init(vrf_id, event);
-
- /* Don't worry about making sure multiple rib updates for specific vrf
- * are scheduled at once for now. If it becomes a problem, we can use a
- * lookup of some sort to keep track of running threads via t_vrf_id
- * like how we are doing it in t_rib_update_threads[].
- */
- thread_add_event(zrouter.master, rib_update_handler, ctx, 0, NULL);
-
- if (IS_ZEBRA_DEBUG_EVENT)
- zlog_debug("%s: Scheduled VRF %s, event %s", __func__,
- vrf_id_to_name(ctx->vrf_id),
- rib_update_event2str(event));
-}
-
/* Schedule a RIB update event for all vrfs */
void rib_update(enum rib_update_event event)
{
diff --git a/zebra/zebra_rnh.c b/zebra/zebra_rnh.c
index 3c4dbc5e9c..48e2bafe44 100644
--- a/zebra/zebra_rnh.c
+++ b/zebra/zebra_rnh.c
@@ -1124,7 +1124,7 @@ int zebra_send_rnh_update(struct rnh *rnh, struct zserv *client,
break;
default:
flog_err(EC_ZEBRA_RNH_UNKNOWN_FAMILY,
- "%s: Unknown family (%d) notification attempted\n",
+ "%s: Unknown family (%d) notification attempted",
__func__, rn->p.family);
goto failure;
}
diff --git a/zebra/zebra_routemap.c b/zebra/zebra_routemap.c
index bbc8b6f19d..17a9bf97f9 100644
--- a/zebra/zebra_routemap.c
+++ b/zebra/zebra_routemap.c
@@ -267,7 +267,8 @@ static int ip_protocol_rm_add(struct zebra_vrf *zvrf, const char *rmap,
/* Process routes of interested address-families. */
table = zebra_vrf_table(afi, safi, zvrf->vrf->vrf_id);
if (table)
- rib_update_table(table, RIB_UPDATE_RMAP_CHANGE);
+ rib_update_table(table, RIB_UPDATE_RMAP_CHANGE,
+ rtype);
}
return CMD_SUCCESS;
@@ -294,7 +295,8 @@ static int ip_protocol_rm_del(struct zebra_vrf *zvrf, const char *rmap,
/* Process routes of interested address-families. */
table = zebra_vrf_table(afi, safi, zvrf->vrf->vrf_id);
if (table)
- rib_update_table(table, RIB_UPDATE_RMAP_CHANGE);
+ rib_update_table(table, RIB_UPDATE_RMAP_CHANGE,
+ rtype);
}
XFREE(MTYPE_ROUTE_MAP_NAME, PROTO_RM_NAME(zvrf, afi, rtype));
}
@@ -576,7 +578,7 @@ DEFUN (zebra_route_map_timer,
ZEBRA_STR
"Set route-map parameters\n"
"Time to wait before route-map updates are processed\n"
- "0 means event-driven updates are disabled\n")
+ "0 means route-map changes are run immediately instead of delaying\n")
{
int idx_number = 3;
uint32_t rmap_delay_timer;
@@ -594,7 +596,7 @@ DEFUN (no_zebra_route_map_timer,
ZEBRA_STR
"Set route-map parameters\n"
"Reset delay-timer to default value, 30 secs\n"
- "0 means event-driven updates are disabled\n")
+ "0 means route-map changes are run immediately instead of delaying\n")
{
zebra_route_map_set_delay_timer(ZEBRA_RMAP_DEFAULT_UPDATE_TIMER);
@@ -1454,8 +1456,6 @@ static void zebra_rib_table_rm_update(const char *rmap)
struct vrf *vrf = NULL;
struct zebra_vrf *zvrf = NULL;
char *rmap_name;
- char afi_ip = 0;
- char afi_ipv6 = 0;
struct route_map *old = NULL;
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
@@ -1486,16 +1486,12 @@ static void zebra_rib_table_rm_update(const char *rmap)
PROTO_RM_MAP(zvrf, AFI_IP, i));
/* There is single rib table for all protocols
*/
- if (afi_ip == 0) {
- table = zvrf->table[AFI_IP]
- [SAFI_UNICAST];
- if (table) {
-
- afi_ip = 1;
- rib_update_table(
- table,
- RIB_UPDATE_RMAP_CHANGE);
- }
+ table = zvrf->table[AFI_IP][SAFI_UNICAST];
+ if (table) {
+ rib_update_table(
+ table,
+ RIB_UPDATE_RMAP_CHANGE,
+ i);
}
}
rmap_name = PROTO_RM_NAME(zvrf, AFI_IP6, i);
@@ -1515,16 +1511,12 @@ static void zebra_rib_table_rm_update(const char *rmap)
PROTO_RM_MAP(zvrf, AFI_IP6, i));
/* There is single rib table for all protocols
*/
- if (afi_ipv6 == 0) {
- table = zvrf->table[AFI_IP6]
- [SAFI_UNICAST];
- if (table) {
-
- afi_ipv6 = 1;
- rib_update_table(
- table,
- RIB_UPDATE_RMAP_CHANGE);
- }
+ table = zvrf->table[AFI_IP6][SAFI_UNICAST];
+ if (table) {
+ rib_update_table(
+ table,
+ RIB_UPDATE_RMAP_CHANGE,
+ i);
}
}
}
@@ -1628,8 +1620,6 @@ static void zebra_route_map_process_update_cb(char *rmap_name)
static int zebra_route_map_update_timer(struct thread *thread)
{
- zebra_t_rmap_update = NULL;
-
if (IS_ZEBRA_DEBUG_EVENT)
zlog_debug("Event driven route-map update triggered");
@@ -1654,8 +1644,8 @@ static void zebra_route_map_set_delay_timer(uint32_t value)
if (!value && zebra_t_rmap_update) {
/* Event driven route map updates is being disabled */
/* But there's a pending timer. Fire it off now */
- thread_cancel(&zebra_t_rmap_update);
- zebra_route_map_update_timer(zebra_t_rmap_update);
+ THREAD_OFF(zebra_t_rmap_update);
+ zebra_route_map_update_timer(NULL);
}
}
@@ -1664,12 +1654,12 @@ void zebra_routemap_finish(void)
/* Set zebra_rmap_update_timer to 0 so that it wont schedule again */
zebra_rmap_update_timer = 0;
/* Thread off if any scheduled already */
- thread_cancel(&zebra_t_rmap_update);
+ THREAD_OFF(zebra_t_rmap_update);
route_map_finish();
}
route_map_result_t
-zebra_route_map_check(int family, int rib_type, uint8_t instance,
+zebra_route_map_check(afi_t family, int rib_type, uint8_t instance,
const struct prefix *p, struct nexthop *nexthop,
struct zebra_vrf *zvrf, route_tag_t tag)
{
@@ -1780,12 +1770,11 @@ route_map_result_t zebra_nht_route_map_check(afi_t afi, int client_proto,
static void zebra_route_map_mark_update(const char *rmap_name)
{
/* rmap_update_timer of 0 means don't do route updates */
- if (zebra_rmap_update_timer && !zebra_t_rmap_update) {
- zebra_t_rmap_update = NULL;
- thread_add_timer(zrouter.master, zebra_route_map_update_timer,
- NULL, zebra_rmap_update_timer,
- &zebra_t_rmap_update);
- }
+ if (zebra_rmap_update_timer)
+ THREAD_OFF(zebra_t_rmap_update);
+
+ thread_add_timer(zrouter.master, zebra_route_map_update_timer,
+ NULL, zebra_rmap_update_timer, &zebra_t_rmap_update);
}
static void zebra_route_map_add(const char *rmap_name)
diff --git a/zebra/zebra_routemap.h b/zebra/zebra_routemap.h
index 251e07af72..c016d95875 100644
--- a/zebra/zebra_routemap.h
+++ b/zebra/zebra_routemap.h
@@ -42,7 +42,7 @@ zebra_import_table_route_map_check(int family, int rib_type, uint8_t instance,
struct nexthop *nexthop, vrf_id_t vrf_id,
route_tag_t tag, const char *rmap_name);
extern route_map_result_t
-zebra_route_map_check(int family, int rib_type, uint8_t instance,
+zebra_route_map_check(afi_t family, int rib_type, uint8_t instance,
const struct prefix *p, struct nexthop *nexthop,
struct zebra_vrf *zvrf, route_tag_t tag);
extern route_map_result_t
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index 697a6eecf1..424c00d5eb 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -3333,7 +3333,7 @@ int zebra_vxlan_clear_dup_detect_vni(struct zebra_vrf *zvrf, vni_t vni)
zevpn = zebra_evpn_lookup(vni);
if (!zevpn) {
- zlog_warn("VNI %u does not exist\n", vni);
+ zlog_warn("VNI %u does not exist", vni);
return CMD_WARNING;
}
@@ -4071,7 +4071,6 @@ int zebra_vxlan_dp_network_mac_add(struct interface *ifp,
* 1. readd the remote MAC if we have it
* 2. local MAC with does ES may also need to be re-installed
*/
-static int zebra_vxlan_do_local_mac_del(zebra_evpn_t *zevpn, zebra_mac_t *mac);
int zebra_vxlan_dp_network_mac_del(struct interface *ifp,
struct interface *br_if,
struct ethaddr *macaddr, vlanid_t vid)
@@ -4122,72 +4121,7 @@ int zebra_vxlan_dp_network_mac_del(struct interface *ifp,
if (IS_ZEBRA_DEBUG_VXLAN || IS_ZEBRA_DEBUG_EVPN_MH_MAC)
zlog_debug("dpDel local-nw-MAC %pEA VNI %u", macaddr,
vni);
- zebra_vxlan_do_local_mac_del(zevpn, mac);
- }
-
- return 0;
-}
-
-static int zebra_vxlan_do_local_mac_del(zebra_evpn_t *zevpn, zebra_mac_t *mac)
-{
- bool old_bgp_ready;
- bool new_bgp_ready;
-
- if (IS_ZEBRA_DEBUG_VXLAN)
- zlog_debug("DEL MAC %pEA VNI %u seq %u flags 0x%x nbr count %u",
- &mac->macaddr, zevpn->vni, mac->loc_seq, mac->flags,
- listcount(mac->neigh_list));
-
- old_bgp_ready = zebra_evpn_mac_is_ready_for_bgp(mac->flags);
- if (zebra_evpn_mac_is_static(mac)) {
- /* this is a synced entry and can only be removed when the
- * es-peers stop advertising it.
- */
- memset(&mac->fwd_info, 0, sizeof(mac->fwd_info));
-
- if (IS_ZEBRA_DEBUG_EVPN_MH_MAC)
- zlog_debug(
- "re-add sync-mac vni %u mac %pEA es %s seq %d f 0x%x",
- zevpn->vni, &mac->macaddr,
- mac->es ? mac->es->esi_str : "-", mac->loc_seq,
- mac->flags);
-
- /* inform-bgp about change in local-activity if any */
- if (!CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL_INACTIVE)) {
- SET_FLAG(mac->flags, ZEBRA_MAC_LOCAL_INACTIVE);
- new_bgp_ready =
- zebra_evpn_mac_is_ready_for_bgp(mac->flags);
- zebra_evpn_mac_send_add_del_to_client(
- mac, old_bgp_ready, new_bgp_ready);
- }
-
- /* re-install the entry in the kernel */
- zebra_evpn_sync_mac_dp_install(mac, false /* set_inactive */,
- false /* force_clear_static */,
- __func__);
-
- return 0;
- }
-
- /* Update all the neigh entries associated with this mac */
- zebra_evpn_process_neigh_on_local_mac_del(zevpn, mac);
-
- /* Remove MAC from BGP. */
- zebra_evpn_mac_send_del_to_client(zevpn->vni, &mac->macaddr, mac->flags,
- false /* force */);
-
- zebra_evpn_es_mac_deref_entry(mac);
-
- /*
- * If there are no neigh associated with the mac delete the mac
- * else mark it as AUTO for forward reference
- */
- if (!listcount(mac->neigh_list)) {
- zebra_evpn_mac_del(zevpn, mac);
- } else {
- UNSET_FLAG(mac->flags, ZEBRA_MAC_ALL_LOCAL_FLAGS);
- UNSET_FLAG(mac->flags, ZEBRA_MAC_STICKY);
- SET_FLAG(mac->flags, ZEBRA_MAC_AUTO);
+ return zebra_evpn_del_local_mac(zevpn, mac);
}
return 0;
@@ -4224,7 +4158,7 @@ int zebra_vxlan_local_mac_del(struct interface *ifp, struct interface *br_if,
if (!CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL))
return 0;
- return zebra_vxlan_do_local_mac_del(zevpn, mac);
+ return zebra_evpn_del_local_mac(zevpn, mac);
}
/*
@@ -6115,9 +6049,9 @@ static void zebra_vxlan_sg_ref(struct in_addr local_vtep_ip,
zebra_vxlan_sg_do_ref(zvrf, local_vtep_ip, mcast_grp);
}
-static void zebra_vxlan_xg_pre_cleanup(struct hash_bucket *backet, void *arg)
+static void zebra_vxlan_xg_pre_cleanup(struct hash_bucket *bucket, void *arg)
{
- zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)backet->data;
+ zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)bucket->data;
/* increment the ref count against (*,G) to prevent them from being
* deleted
@@ -6126,9 +6060,9 @@ static void zebra_vxlan_xg_pre_cleanup(struct hash_bucket *backet, void *arg)
++vxlan_sg->ref_cnt;
}
-static void zebra_vxlan_xg_post_cleanup(struct hash_bucket *backet, void *arg)
+static void zebra_vxlan_xg_post_cleanup(struct hash_bucket *bucket, void *arg)
{
- zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)backet->data;
+ zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)bucket->data;
/* decrement the dummy ref count against (*,G) to delete them */
if (vxlan_sg->sg.src.s_addr == INADDR_ANY) {
@@ -6139,9 +6073,9 @@ static void zebra_vxlan_xg_post_cleanup(struct hash_bucket *backet, void *arg)
}
}
-static void zebra_vxlan_sg_cleanup(struct hash_bucket *backet, void *arg)
+static void zebra_vxlan_sg_cleanup(struct hash_bucket *bucket, void *arg)
{
- zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)backet->data;
+ zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)bucket->data;
zebra_vxlan_sg_del(vxlan_sg);
}
@@ -6159,9 +6093,9 @@ static void zebra_vxlan_cleanup_sg_table(struct zebra_vrf *zvrf)
hash_iterate(zvrf->vxlan_sg_table, zebra_vxlan_xg_post_cleanup, NULL);
}
-static void zebra_vxlan_sg_replay_send(struct hash_bucket *backet, void *arg)
+static void zebra_vxlan_sg_replay_send(struct hash_bucket *bucket, void *arg)
{
- zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)backet->data;
+ zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)bucket->data;
zebra_vxlan_sg_send(vxlan_sg->zvrf, &vxlan_sg->sg,
vxlan_sg->sg_str, ZEBRA_VXLAN_SG_ADD);
diff --git a/zebra/zserv.c b/zebra/zserv.c
index 484d94fac8..6c5eebe6fe 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -55,6 +55,7 @@
#include "lib/frr_pthread.h" /* for frr_pthread_new, frr_pthread_stop... */
#include "lib/frratomic.h" /* for atomic_load_explicit, atomic_stor... */
#include "lib/lib_errors.h" /* for generic ferr ids */
+#include "lib/printfrr.h" /* for string functions */
#include "zebra/debug.h" /* for various debugging macros */
#include "zebra/rib.h" /* for rib_score_proto */
@@ -1186,6 +1187,7 @@ static void zebra_show_stale_client_detail(struct vty *vty,
static void zebra_show_client_brief(struct vty *vty, struct zserv *client)
{
+ char client_string[80];
char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF];
char wbuf[ZEBRA_TIME_BUF];
time_t connect_time, last_read_time, last_write_time;
@@ -1197,8 +1199,15 @@ static void zebra_show_client_brief(struct vty *vty, struct zserv *client)
last_write_time = (time_t)atomic_load_explicit(&client->last_write_time,
memory_order_relaxed);
- vty_out(vty, "%-10s%12s %12s%12s%8d/%-8d%8d/%-8d\n",
- zebra_route_string(client->proto),
+ if (client->instance || client->session_id)
+ snprintfrr(client_string, sizeof(client_string), "%s[%u:%u]",
+ zebra_route_string(client->proto), client->instance,
+ client->session_id);
+ else
+ snprintfrr(client_string, sizeof(client_string), "%s",
+ zebra_route_string(client->proto));
+
+ vty_out(vty, "%-10s%12s %12s%12s%8d/%-8d%8d/%-8d\n", client_string,
zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF),
zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF),
zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF),