summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bfdd/bfd.c3
-rw-r--r--bgpd/bgp_bmp.c9
-rw-r--r--bgpd/bgp_packet.c3
-rw-r--r--bgpd/bgp_pbr.c92
-rw-r--r--bgpd/bgp_rpki.c5
-rw-r--r--bgpd/bgpd.c2
-rw-r--r--doc/user/static.rst4
-rw-r--r--isisd/isis_cli.c34
-rw-r--r--isisd/isis_zebra.c1
-rw-r--r--lib/command_parse.y8
-rw-r--r--lib/northbound.c24
-rw-r--r--lib/northbound.h21
-rw-r--r--lib/northbound_oper.c83
-rw-r--r--lib/resolver.c2
-rw-r--r--pathd/path_cli.c42
-rw-r--r--pimd/pim6_cmd.c16
-rw-r--r--pimd/pim_autorp.c133
-rw-r--r--pimd/pim_bsm.c5
-rw-r--r--pimd/pim_bsr_rpdb.c19
-rw-r--r--pimd/pim_cmd.c112
-rw-r--r--pimd/pim_iface.c4
-rw-r--r--pimd/pim_nb_config.c5
-rw-r--r--staticd/static_vty.c68
-rw-r--r--staticd/static_zebra.c14
-rw-r--r--tests/.gitignore1
-rw-r--r--tests/lib/northbound/test_oper_data.c3
-rw-r--r--tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py115
-rw-r--r--tests/topotests/lib/common_config.py66
-rw-r--r--tests/topotests/static_simple/test_static_simple.py20
-rw-r--r--yang/frr-pathd.yang96
-rw-r--r--zebra/dplane_fpm_nl.c29
-rw-r--r--zebra/fpm_listener.c141
-rw-r--r--zebra/rib.h7
-rw-r--r--zebra/zebra_dplane.c8
-rw-r--r--zebra/zebra_dplane.h3
-rw-r--r--zebra/zebra_rib.c134
-rw-r--r--zebra/zebra_vty.c15
37 files changed, 1023 insertions, 324 deletions
diff --git a/bfdd/bfd.c b/bfdd/bfd.c
index 3cee2565f3..f199970e20 100644
--- a/bfdd/bfd.c
+++ b/bfdd/bfd.c
@@ -1545,6 +1545,7 @@ void bfd_set_shutdown(struct bfd_session *bs, bool shutdown)
return;
SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN);
+ bs->local_diag = BD_ADMIN_DOWN;
/* Handle data plane shutdown case. */
if (bs->bdc) {
@@ -2517,7 +2518,7 @@ void sbfd_reflector_free(const uint32_t discr)
return;
}
-void sbfd_reflector_flush()
+void sbfd_reflector_flush(void)
{
sbfd_discr_iterate(_sbfd_reflector_free, NULL);
return;
diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c
index b1bff82b05..e09e7b4941 100644
--- a/bgpd/bgp_bmp.c
+++ b/bgpd/bgp_bmp.c
@@ -3542,7 +3542,6 @@ static int bmp_bgp_attribute_updated(struct bgp *bgp, bool withdraw)
struct bmp_targets *bt;
struct listnode *node;
struct bmp_imported_bgp *bib;
- int ret = 0;
struct stream *s = bmp_peerstate(bgp->peer_self, withdraw);
struct bmp *bmp;
afi_t afi;
@@ -3553,8 +3552,8 @@ static int bmp_bgp_attribute_updated(struct bgp *bgp, bool withdraw)
if (bmpbgp) {
frr_each (bmp_targets, &bmpbgp->targets, bt) {
- ret = bmp_bgp_attribute_updated_instance(bt, &bmpbgp->vrf_state, bgp,
- withdraw, s);
+ bmp_bgp_attribute_updated_instance(bt, &bmpbgp->vrf_state, bgp,
+ withdraw, s);
if (withdraw)
continue;
frr_each (bmp_session, &bt->sessions, bmp) {
@@ -3575,8 +3574,8 @@ static int bmp_bgp_attribute_updated(struct bgp *bgp, bool withdraw)
frr_each (bmp_imported_bgps, &bt->imported_bgps, bib) {
if (bgp_lookup_by_name(bib->name) != bgp)
continue;
- ret += bmp_bgp_attribute_updated_instance(bt, &bib->vrf_state, bgp,
- withdraw, s);
+ bmp_bgp_attribute_updated_instance(bt, &bib->vrf_state, bgp,
+ withdraw, s);
if (withdraw)
continue;
frr_each (bmp_session, &bt->sessions, bmp) {
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index b18ebf411c..16e94d9fe2 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -3995,7 +3995,7 @@ void bgp_process_packet(struct event *thread)
uint32_t processed = 0, curr_connection_processed = 0;
bool more_work = false;
size_t count;
- uint32_t total_packets_to_process, total_processed = 0;
+ uint32_t total_packets_to_process;
frr_with_mutex (&bm->peer_connection_mtx)
connection = peer_connection_fifo_pop(&bm->connection_fifo);
@@ -4011,7 +4011,6 @@ void bgp_process_packet(struct event *thread)
fsm_update_result = 0;
while ((processed < total_packets_to_process) && connection) {
- total_processed++;
/* Guard against scheduled events that occur after peer deletion. */
if (connection->status == Deleted || connection->status == Clearing) {
frr_with_mutex (&bm->peer_connection_mtx)
diff --git a/bgpd/bgp_pbr.c b/bgpd/bgp_pbr.c
index be4c4741fd..2b13715da3 100644
--- a/bgpd/bgp_pbr.c
+++ b/bgpd/bgp_pbr.c
@@ -279,6 +279,13 @@ static void bgp_pbr_policyroute_add_to_zebra_unit(struct bgp *bgp,
static void bgp_pbr_dump_entry(struct bgp_pbr_filter *bpf, bool add);
+static void bgp_pbr_val_mask_free(void *arg)
+{
+ struct bgp_pbr_val_mask *pbr_val_mask = arg;
+
+ XFREE(MTYPE_PBR_VALMASK, pbr_val_mask);
+}
+
static bool bgp_pbr_extract_enumerate_unary_opposite(
uint8_t unary_operator,
struct bgp_pbr_val_mask *and_valmask,
@@ -965,7 +972,12 @@ int bgp_pbr_build_and_validate_entry(const struct prefix *p,
return 0;
}
-static void bgp_pbr_match_entry_free(void *arg)
+static void bgp_pbr_match_entry_free(struct bgp_pbr_match_entry *bpme)
+{
+ XFREE(MTYPE_PBR_MATCH_ENTRY, bpme);
+}
+
+static void bgp_pbr_match_entry_hash_free(void *arg)
{
struct bgp_pbr_match_entry *bpme;
@@ -976,16 +988,21 @@ static void bgp_pbr_match_entry_free(void *arg)
bpme->installed = false;
bpme->backpointer = NULL;
}
- XFREE(MTYPE_PBR_MATCH_ENTRY, bpme);
+ bgp_pbr_match_entry_free(bpme);
+}
+
+static void bgp_pbr_match_free(struct bgp_pbr_match *bpm)
+{
+ XFREE(MTYPE_PBR_MATCH, bpm);
}
-static void bgp_pbr_match_free(void *arg)
+static void bgp_pbr_match_hash_free(void *arg)
{
struct bgp_pbr_match *bpm;
bpm = (struct bgp_pbr_match *)arg;
- hash_clean(bpm->entry_hash, bgp_pbr_match_entry_free);
+ hash_clean(bpm->entry_hash, bgp_pbr_match_entry_hash_free);
if (hashcount(bpm->entry_hash) == 0) {
/* delete iptable entry first */
@@ -1004,7 +1021,7 @@ static void bgp_pbr_match_free(void *arg)
}
hash_clean_and_free(&bpm->entry_hash, NULL);
- XFREE(MTYPE_PBR_MATCH, bpm);
+ bgp_pbr_match_free(bpm);
}
static void *bgp_pbr_match_alloc_intern(void *arg)
@@ -1019,7 +1036,12 @@ static void *bgp_pbr_match_alloc_intern(void *arg)
return new;
}
-static void bgp_pbr_rule_free(void *arg)
+static void bgp_pbr_rule_free(struct bgp_pbr_rule *pbr)
+{
+ XFREE(MTYPE_PBR_RULE, pbr);
+}
+
+static void bgp_pbr_rule_hash_free(void *arg)
{
struct bgp_pbr_rule *bpr;
@@ -1032,7 +1054,7 @@ static void bgp_pbr_rule_free(void *arg)
bpr->action->refcnt--;
bpr->action = NULL;
}
- XFREE(MTYPE_PBR_RULE, bpr);
+ bgp_pbr_rule_free(bpr);
}
static void *bgp_pbr_rule_alloc_intern(void *arg)
@@ -1372,8 +1394,8 @@ struct bgp_pbr_match *bgp_pbr_match_iptable_lookup(vrf_id_t vrf_id,
void bgp_pbr_cleanup(struct bgp *bgp)
{
- hash_clean_and_free(&bgp->pbr_match_hash, bgp_pbr_match_free);
- hash_clean_and_free(&bgp->pbr_rule_hash, bgp_pbr_rule_free);
+ hash_clean_and_free(&bgp->pbr_match_hash, bgp_pbr_match_hash_free);
+ hash_clean_and_free(&bgp->pbr_rule_hash, bgp_pbr_rule_hash_free);
hash_clean_and_free(&bgp->pbr_action_hash, bgp_pbr_action_free);
if (bgp->bgp_pbr_cfg == NULL)
@@ -1656,6 +1678,8 @@ static void bgp_pbr_flush_iprule(struct bgp *bgp, struct bgp_pbr_action *bpa,
}
}
hash_release(bgp->pbr_rule_hash, bpr);
+ bgp_pbr_rule_free(bpr);
+
bgp_pbr_bpa_remove(bpa);
}
@@ -1685,6 +1709,7 @@ static void bgp_pbr_flush_entry(struct bgp *bgp, struct bgp_pbr_action *bpa,
}
}
hash_release(bpm->entry_hash, bpme);
+ bgp_pbr_match_entry_free(bpme);
if (hashcount(bpm->entry_hash) == 0) {
/* delete iptable entry first */
/* then delete ipset match */
@@ -1700,6 +1725,7 @@ static void bgp_pbr_flush_entry(struct bgp *bgp, struct bgp_pbr_action *bpa,
bpm->action = NULL;
}
hash_release(bgp->pbr_match_hash, bpm);
+ bgp_pbr_match_free(bpm);
/* XXX release pbr_match_action if not used
* note that drop does not need to call send_pbr_action
*/
@@ -2111,17 +2137,6 @@ static void bgp_pbr_policyroute_remove_from_zebra(
bgp, path, bpf, bpof, FLOWSPEC_ICMP_TYPE);
else
bgp_pbr_policyroute_remove_from_zebra_unit(bgp, path, bpf);
- /* flush bpof */
- if (bpof->tcpflags)
- list_delete_all_node(bpof->tcpflags);
- if (bpof->dscp)
- list_delete_all_node(bpof->dscp);
- if (bpof->flowlabel)
- list_delete_all_node(bpof->flowlabel);
- if (bpof->pkt_len)
- list_delete_all_node(bpof->pkt_len);
- if (bpof->fragment)
- list_delete_all_node(bpof->fragment);
}
static void bgp_pbr_dump_entry(struct bgp_pbr_filter *bpf, bool add)
@@ -2606,19 +2621,6 @@ static void bgp_pbr_policyroute_add_to_zebra(struct bgp *bgp,
bgp, path, bpf, bpof, nh, rate, FLOWSPEC_ICMP_TYPE);
else
bgp_pbr_policyroute_add_to_zebra_unit(bgp, path, bpf, nh, rate);
- /* flush bpof */
- if (bpof->tcpflags)
- list_delete_all_node(bpof->tcpflags);
- if (bpof->dscp)
- list_delete_all_node(bpof->dscp);
- if (bpof->pkt_len)
- list_delete_all_node(bpof->pkt_len);
- if (bpof->fragment)
- list_delete_all_node(bpof->fragment);
- if (bpof->icmp_type)
- list_delete_all_node(bpof->icmp_type);
- if (bpof->icmp_code)
- list_delete_all_node(bpof->icmp_code);
}
static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
@@ -2684,6 +2686,7 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
srcp = &range;
else {
bpof.icmp_type = list_new();
+ bpof.icmp_type->del = bgp_pbr_val_mask_free;
bgp_pbr_extract_enumerate(api->icmp_type,
api->match_icmp_type_num,
OPERATOR_UNARY_OR,
@@ -2699,6 +2702,7 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
dstp = &range_icmp_code;
else {
bpof.icmp_code = list_new();
+ bpof.icmp_code->del = bgp_pbr_val_mask_free;
bgp_pbr_extract_enumerate(api->icmp_code,
api->match_icmp_code_num,
OPERATOR_UNARY_OR,
@@ -2719,6 +2723,7 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
FLOWSPEC_TCP_FLAGS);
} else if (kind_enum == OPERATOR_UNARY_OR) {
bpof.tcpflags = list_new();
+ bpof.tcpflags->del = bgp_pbr_val_mask_free;
bgp_pbr_extract_enumerate(api->tcpflags,
api->match_tcpflags_num,
OPERATOR_UNARY_OR,
@@ -2736,6 +2741,7 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
bpf.pkt_len = &pkt_len;
else {
bpof.pkt_len = list_new();
+ bpof.pkt_len->del = bgp_pbr_val_mask_free;
bgp_pbr_extract_enumerate(api->packet_length,
api->match_packet_length_num,
OPERATOR_UNARY_OR,
@@ -2745,12 +2751,14 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
}
if (api->match_dscp_num >= 1) {
bpof.dscp = list_new();
+ bpof.dscp->del = bgp_pbr_val_mask_free;
bgp_pbr_extract_enumerate(api->dscp, api->match_dscp_num,
OPERATOR_UNARY_OR,
bpof.dscp, FLOWSPEC_DSCP);
}
if (api->match_fragment_num) {
bpof.fragment = list_new();
+ bpof.fragment->del = bgp_pbr_val_mask_free;
bgp_pbr_extract_enumerate(api->fragment,
api->match_fragment_num,
OPERATOR_UNARY_OR,
@@ -2766,7 +2774,7 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
bpf.family = afi2family(api->afi);
if (!add) {
bgp_pbr_policyroute_remove_from_zebra(bgp, path, &bpf, &bpof);
- return;
+ goto flush_bpof;
}
/* no action for add = true */
for (i = 0; i < api->action_num; i++) {
@@ -2844,6 +2852,22 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
if (continue_loop == 0)
break;
}
+
+flush_bpof:
+ if (bpof.tcpflags)
+ list_delete(&bpof.tcpflags);
+ if (bpof.dscp)
+ list_delete(&bpof.dscp);
+ if (bpof.flowlabel)
+ list_delete(&bpof.flowlabel);
+ if (bpof.pkt_len)
+ list_delete(&bpof.pkt_len);
+ if (bpof.fragment)
+ list_delete(&bpof.fragment);
+ if (bpof.icmp_type)
+ list_delete(&bpof.icmp_type);
+ if (bpof.icmp_code)
+ list_delete(&bpof.icmp_code);
}
void bgp_pbr_update_entry(struct bgp *bgp, const struct prefix *p,
diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c
index 04a709b350..aefb58094b 100644
--- a/bgpd/bgp_rpki.c
+++ b/bgpd/bgp_rpki.c
@@ -529,7 +529,10 @@ static struct rtr_mgr_group *get_groups(struct list *cache_list)
inline bool is_synchronized(struct rpki_vrf *rpki_vrf)
{
- return rpki_vrf->rtr_is_synced;
+ if (is_running(rpki_vrf))
+ return rpki_vrf->rtr_is_synced;
+ else
+ return false;
}
inline bool is_running(struct rpki_vrf *rpki_vrf)
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 31cd573aee..83f8057736 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -8972,7 +8972,7 @@ void bgp_terminate(void)
EVENT_OFF(bm->t_bgp_zebra_l3_vni);
bgp_mac_finish();
-#if ENABLE_BGP_VNC
+#ifdef ENABLE_BGP_VNC
rfapi_terminate();
#endif
}
diff --git a/doc/user/static.rst b/doc/user/static.rst
index c1d11cf0b0..8a32460547 100644
--- a/doc/user/static.rst
+++ b/doc/user/static.rst
@@ -46,8 +46,8 @@ a static prefix and gateway, with several possible forms.
NETWORK is destination prefix with a valid v4 or v6 network based upon
initial form of the command.
- GATEWAY is the IP address to use as next-hop for the prefix. Currently, it must match
- the v4 or v6 route type specified at the start of the command.
+ GATEWAY is the IP address to use as next-hop for the prefix. Routes of type v4 can use v4 and v6 next-hops,
+ v6 routes only support v6 next-hops.
IFNAME is the name of the interface to use as next-hop. If only IFNAME is specified
(without GATEWAY), a connected route will be created.
diff --git a/isisd/isis_cli.c b/isisd/isis_cli.c
index c86d929903..46611a75ec 100644
--- a/isisd/isis_cli.c
+++ b/isisd/isis_cli.c
@@ -2157,7 +2157,7 @@ DEFPY_YANG_NOSH (isis_srv6_node_msd,
/*
* XPath: /frr-isisd:isis/instance/segment-routing-srv6/msd/node-msd/max-segs-left
*/
-DEFPY (isis_srv6_node_msd_max_segs_left,
+DEFPY_YANG (isis_srv6_node_msd_max_segs_left,
isis_srv6_node_msd_max_segs_left_cmd,
"[no] max-segs-left (0-255)$max_segs_left",
NO_STR
@@ -2177,7 +2177,7 @@ DEFPY (isis_srv6_node_msd_max_segs_left,
/*
* XPath: /frr-isisd:isis/instance/segment-routing-srv6/msd/node-msd/max-end-pop
*/
-DEFPY (isis_srv6_node_msd_max_end_pop,
+DEFPY_YANG (isis_srv6_node_msd_max_end_pop,
isis_srv6_node_msd_max_end_pop_cmd,
"[no] max-end-pop (0-255)$max_end_pop",
NO_STR
@@ -2196,7 +2196,7 @@ DEFPY (isis_srv6_node_msd_max_end_pop,
/*
* XPath: /frr-isisd:isis/instance/segment-routing-srv6/msd/node-msd/max-h-encaps
*/
-DEFPY (isis_srv6_node_msd_max_h_encaps,
+DEFPY_YANG (isis_srv6_node_msd_max_h_encaps,
isis_srv6_node_msd_max_h_encaps_cmd,
"[no] max-h-encaps (0-255)$max_h_encaps",
NO_STR
@@ -2216,7 +2216,7 @@ DEFPY (isis_srv6_node_msd_max_h_encaps,
/*
* XPath: /frr-isisd:isis/instance/segment-routing-srv6/msd/node-msd/max-end-d
*/
-DEFPY (isis_srv6_node_msd_max_end_d,
+DEFPY_YANG (isis_srv6_node_msd_max_end_d,
isis_srv6_node_msd_max_end_d_cmd,
"[no] max-end-d (0-255)$max_end_d",
NO_STR
@@ -2262,7 +2262,7 @@ void cli_show_isis_srv6_node_msd_end(struct vty *vty, const struct lyd_node *dno
/*
* XPath: /frr-isisd:isis/instance/segment-routing-srv6/interface
*/
-DEFPY (isis_srv6_interface,
+DEFPY_YANG (isis_srv6_interface,
isis_srv6_interface_cmd,
"[no] interface WORD$interface",
NO_STR
@@ -3268,7 +3268,7 @@ void cli_show_ip_isis_frr(struct vty *vty, const struct lyd_node *dnode,
/*
* XPath: /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-{1,2}/lfa/enable
*/
-DEFPY(isis_lfa, isis_lfa_cmd,
+DEFPY_YANG(isis_lfa, isis_lfa_cmd,
"[no] isis fast-reroute lfa [level-1|level-2]$level",
NO_STR
"IS-IS routing protocol\n"
@@ -3311,7 +3311,7 @@ DEFPY(isis_lfa, isis_lfa_cmd,
* XPath:
* /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-{1,2}/lfa/exclude-interface
*/
-DEFPY(isis_lfa_exclude_interface, isis_lfa_exclude_interface_cmd,
+DEFPY_YANG(isis_lfa_exclude_interface, isis_lfa_exclude_interface_cmd,
"[no] isis fast-reroute lfa [level-1|level-2]$level exclude interface IFNAME$ifname",
NO_STR
"IS-IS routing protocol\n"
@@ -3362,7 +3362,7 @@ void cli_show_frr_lfa_exclude_interface(struct vty *vty,
* XPath:
* /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-{1,2}/remote-lfa/enable
*/
-DEFPY(isis_remote_lfa, isis_remote_lfa_cmd,
+DEFPY_YANG(isis_remote_lfa, isis_remote_lfa_cmd,
"[no] isis fast-reroute remote-lfa tunnel mpls-ldp [level-1|level-2]$level",
NO_STR
"IS-IS routing protocol\n"
@@ -3407,7 +3407,7 @@ DEFPY(isis_remote_lfa, isis_remote_lfa_cmd,
* XPath:
* /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-{1,2}/remote-lfa/maximum-metric
*/
-DEFPY(isis_remote_lfa_max_metric, isis_remote_lfa_max_metric_cmd,
+DEFPY_YANG(isis_remote_lfa_max_metric, isis_remote_lfa_max_metric_cmd,
"[no] isis fast-reroute remote-lfa maximum-metric (1-16777215)$metric [level-1|level-2]$level",
NO_STR
"IS-IS routing protocol\n"
@@ -3460,7 +3460,7 @@ void cli_show_frr_remote_lfa_max_metric(struct vty *vty,
/*
* XPath: /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-{1,2}/ti-lfa/enable
*/
-DEFPY(isis_ti_lfa, isis_ti_lfa_cmd,
+DEFPY_YANG(isis_ti_lfa, isis_ti_lfa_cmd,
"[no] isis fast-reroute ti-lfa [level-1|level-2]$level [node-protection$node_protection [link-fallback$link_fallback]]",
NO_STR
"IS-IS routing protocol\n"
@@ -3578,7 +3578,7 @@ void cli_show_isis_log_pdu_drops(struct vty *vty, const struct lyd_node *dnode,
/*
* XPath: /frr-isisd:isis/instance/mpls/ldp-sync
*/
-DEFPY(isis_mpls_ldp_sync, isis_mpls_ldp_sync_cmd, "mpls ldp-sync",
+DEFPY_YANG(isis_mpls_ldp_sync, isis_mpls_ldp_sync_cmd, "mpls ldp-sync",
MPLS_STR MPLS_LDP_SYNC_STR)
{
nb_cli_enqueue_change(vty, "./mpls/ldp-sync", NB_OP_CREATE, NULL);
@@ -3586,7 +3586,7 @@ DEFPY(isis_mpls_ldp_sync, isis_mpls_ldp_sync_cmd, "mpls ldp-sync",
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(no_isis_mpls_ldp_sync, no_isis_mpls_ldp_sync_cmd, "no mpls ldp-sync",
+DEFPY_YANG(no_isis_mpls_ldp_sync, no_isis_mpls_ldp_sync_cmd, "no mpls ldp-sync",
NO_STR MPLS_STR NO_MPLS_LDP_SYNC_STR)
{
nb_cli_enqueue_change(vty, "./mpls/ldp-sync", NB_OP_DESTROY, NULL);
@@ -3600,7 +3600,7 @@ void cli_show_isis_mpls_ldp_sync(struct vty *vty, const struct lyd_node *dnode,
vty_out(vty, " mpls ldp-sync\n");
}
-DEFPY(isis_mpls_ldp_sync_holddown, isis_mpls_ldp_sync_holddown_cmd,
+DEFPY_YANG(isis_mpls_ldp_sync_holddown, isis_mpls_ldp_sync_holddown_cmd,
"mpls ldp-sync holddown (0-10000)",
MPLS_STR MPLS_LDP_SYNC_STR
"Time to wait for LDP-SYNC to occur before restoring interface metric\n"
@@ -3612,7 +3612,7 @@ DEFPY(isis_mpls_ldp_sync_holddown, isis_mpls_ldp_sync_holddown_cmd,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(no_isis_mpls_ldp_sync_holddown, no_isis_mpls_ldp_sync_holddown_cmd,
+DEFPY_YANG(no_isis_mpls_ldp_sync_holddown, no_isis_mpls_ldp_sync_holddown_cmd,
"no mpls ldp-sync holddown [<(1-10000)>]",
NO_STR MPLS_STR MPLS_LDP_SYNC_STR NO_MPLS_LDP_SYNC_HOLDDOWN_STR "Time in seconds\n")
{
@@ -3633,7 +3633,7 @@ void cli_show_isis_mpls_ldp_sync_holddown(struct vty *vty,
/*
* XPath: /frr-interface:lib/interface/frr-isisd:isis/mpls/ldp-sync
*/
-DEFPY(isis_mpls_if_ldp_sync, isis_mpls_if_ldp_sync_cmd,
+DEFPY_YANG(isis_mpls_if_ldp_sync, isis_mpls_if_ldp_sync_cmd,
"[no] isis mpls ldp-sync",
NO_STR "IS-IS routing protocol\n" MPLS_STR MPLS_LDP_SYNC_STR)
{
@@ -3663,7 +3663,7 @@ void cli_show_isis_mpls_if_ldp_sync(struct vty *vty,
vty_out(vty, " isis mpls ldp-sync\n");
}
-DEFPY(isis_mpls_if_ldp_sync_holddown, isis_mpls_if_ldp_sync_holddown_cmd,
+DEFPY_YANG(isis_mpls_if_ldp_sync_holddown, isis_mpls_if_ldp_sync_holddown_cmd,
"isis mpls ldp-sync holddown (0-10000)",
"IS-IS routing protocol\n" MPLS_STR MPLS_LDP_SYNC_STR
"Time to wait for LDP-SYNC to occur before restoring interface metric\n"
@@ -3684,7 +3684,7 @@ DEFPY(isis_mpls_if_ldp_sync_holddown, isis_mpls_if_ldp_sync_holddown_cmd,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(no_isis_mpls_if_ldp_sync_holddown, no_isis_mpls_if_ldp_sync_holddown_cmd,
+DEFPY_YANG(no_isis_mpls_if_ldp_sync_holddown, no_isis_mpls_if_ldp_sync_holddown_cmd,
"no isis mpls ldp-sync holddown [<(1-10000)>]",
NO_STR "IS-IS routing protocol\n" MPLS_STR NO_MPLS_LDP_SYNC_STR
NO_MPLS_LDP_SYNC_HOLDDOWN_STR "Time in seconds\n")
diff --git a/isisd/isis_zebra.c b/isisd/isis_zebra.c
index 9d483c9368..15af9636ca 100644
--- a/isisd/isis_zebra.c
+++ b/isisd/isis_zebra.c
@@ -1549,6 +1549,7 @@ static int isis_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS)
isis_zebra_srv6_sid_uninstall(area, sid);
listnode_delete(area->srv6db.srv6_sids,
sid);
+ isis_srv6_sid_free(sid);
}
/* Allocate new SRv6 End SID */
diff --git a/lib/command_parse.y b/lib/command_parse.y
index 8867e98ccc..6758aed142 100644
--- a/lib/command_parse.y
+++ b/lib/command_parse.y
@@ -158,6 +158,14 @@
ctx->docstr_start = ctx->docstr;
}
+%{
+#ifdef __clang__
+# if __clang_major__ > 12
+# pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+# endif
+#endif
+%}
+
%%
start:
diff --git a/lib/northbound.c b/lib/northbound.c
index f860b83c45..a1e26d2523 100644
--- a/lib/northbound.c
+++ b/lib/northbound.c
@@ -235,8 +235,9 @@ static int nb_node_validate_cb(const struct nb_node *nb_node,
* depends on context (e.g. some daemons might augment "frr-interface"
* while others don't).
*/
- if (!valid && callback_implemented && operation != NB_CB_GET_NEXT
- && operation != NB_CB_GET_KEYS && operation != NB_CB_LOOKUP_ENTRY)
+ if (!valid && callback_implemented && operation != NB_CB_GET_NEXT &&
+ operation != NB_CB_GET_KEYS && operation != NB_CB_LIST_ENTRY_DONE &&
+ operation != NB_CB_LOOKUP_ENTRY)
flog_warn(EC_LIB_NB_CB_UNNEEDED,
"unneeded '%s' callback for '%s'",
nb_cb_operation_name(operation), nb_node->xpath);
@@ -283,6 +284,8 @@ static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
state_optional);
error += nb_node_validate_cb(nb_node, NB_CB_GET_KEYS, !!nb_node->cbs.get_keys,
state_optional);
+ error += nb_node_validate_cb(nb_node, NB_CB_LIST_ENTRY_DONE, !!nb_node->cbs.list_entry_done,
+ true);
error += nb_node_validate_cb(nb_node, NB_CB_LOOKUP_ENTRY, !!nb_node->cbs.lookup_entry,
state_optional);
error += nb_node_validate_cb(nb_node, NB_CB_RPC, !!nb_node->cbs.rpc,
@@ -1806,6 +1809,19 @@ int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
return nb_node->cbs.get_keys(&args);
}
+void nb_callback_list_entry_done(const struct nb_node *nb_node, const void *parent_list_entry,
+ const void *list_entry)
+{
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CFG_CBS) || !nb_node->cbs.list_entry_done)
+ return;
+
+ DEBUGD(&nb_dbg_cbs_state,
+ "northbound callback (list_entry_done): node [%s] parent_list_entry [%p] list_entry [%p]",
+ nb_node->xpath, parent_list_entry, list_entry);
+
+ nb_node->cbs.list_entry_done(parent_list_entry, list_entry);
+}
+
const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
const void *parent_list_entry,
const struct yang_list_keys *keys)
@@ -1943,6 +1959,7 @@ static int nb_callback_configuration(struct nb_context *context,
case NB_CB_GET_ELEM:
case NB_CB_GET_NEXT:
case NB_CB_GET_KEYS:
+ case NB_CB_LIST_ENTRY_DONE:
case NB_CB_LOOKUP_ENTRY:
case NB_CB_RPC:
case NB_CB_NOTIFY:
@@ -2322,6 +2339,7 @@ bool nb_cb_operation_is_valid(enum nb_cb_operation operation,
}
return true;
case NB_CB_GET_KEYS:
+ case NB_CB_LIST_ENTRY_DONE:
case NB_CB_LOOKUP_ENTRY:
switch (snode->nodetype) {
case LYS_LIST:
@@ -2625,6 +2643,8 @@ const char *nb_cb_operation_name(enum nb_cb_operation operation)
return "get_next";
case NB_CB_GET_KEYS:
return "get_keys";
+ case NB_CB_LIST_ENTRY_DONE:
+ return "list_entry_done";
case NB_CB_LOOKUP_ENTRY:
return "lookup_entry";
case NB_CB_RPC:
diff --git a/lib/northbound.h b/lib/northbound.h
index 0468c58de3..53abf90a9f 100644
--- a/lib/northbound.h
+++ b/lib/northbound.h
@@ -98,6 +98,7 @@ enum nb_cb_operation {
NB_CB_GET_ELEM,
NB_CB_GET_NEXT,
NB_CB_GET_KEYS,
+ NB_CB_LIST_ENTRY_DONE,
NB_CB_LOOKUP_ENTRY,
NB_CB_RPC,
NB_CB_NOTIFY,
@@ -518,6 +519,24 @@ struct nb_callbacks {
/*
* Operational data callback for YANG lists.
*
+ * This callback function is called to cleanup any resources that may be
+ * held by a backend opaque `list_entry` value (e.g., a lock). It is
+ * called when the northbound code is done using a `list_entry` value it
+ * obtained using the lookup_entry() callback. It is also called on the
+ * `list_entry` returned from the get_next() or lookup_next() callbacks
+ * if the iteration aborts before walking to the end of the list. The
+ * intention is to allow any resources (e.g., a lock) to now be
+ * released.
+ *
+ * args
+ * parent_list_entry - pointer to the parent list entry
+ * list_entry - value returned previously from `lookup_entry()`
+ */
+ void (*list_entry_done)(const void *parent_list_entry, const void *list_entry);
+
+ /*
+ * Operational data callback for YANG lists.
+ *
* The callback function should return a list entry based on the list
* keys given as a parameter. Keyless lists don't need to implement this
* callback.
@@ -883,6 +902,8 @@ extern int nb_callback_get_keys(const struct nb_node *nb_node,
extern const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
const void *parent_list_entry,
const struct yang_list_keys *keys);
+extern void nb_callback_list_entry_done(const struct nb_node *nb_node,
+ const void *parent_list_entry, const void *list_entry);
extern const void *nb_callback_lookup_node_entry(struct lyd_node *node,
const void *parent_list_entry);
extern const void *nb_callback_lookup_next(const struct nb_node *nb_node,
diff --git a/lib/northbound_oper.c b/lib/northbound_oper.c
index d9ad9b1701..0ce9d77259 100644
--- a/lib/northbound_oper.c
+++ b/lib/northbound_oper.c
@@ -139,6 +139,9 @@ static const void *nb_op_list_get_next(struct nb_op_yield_state *ys, struct nb_n
static const void *nb_op_list_lookup_entry(struct nb_op_yield_state *ys, struct nb_node *nb_node,
const struct nb_op_node_info *pni, struct lyd_node *node,
const struct yang_list_keys *keys);
+static void nb_op_list_list_entry_done(struct nb_op_yield_state *ys, struct nb_node *nb_node,
+ const struct nb_op_node_info *pni, const void *list_entry);
+static void ys_pop_inner(struct nb_op_yield_state *ys);
/* -------------------- */
/* Function Definitions */
@@ -157,8 +160,8 @@ nb_op_create_yield_state(const char *xpath, struct yang_translator *translator,
/* remove trailing '/'s */
while (darr_len(ys->xpath) > 1 && ys->xpath[darr_len(ys->xpath) - 2] == '/') {
darr_setlen(ys->xpath, darr_len(ys->xpath) - 1);
- if (darr_last(ys->xpath))
- *darr_last(ys->xpath) = 0;
+ assert(darr_last(ys->xpath)); /* quiet clang-analyzer :( */
+ *darr_last(ys->xpath) = 0;
}
ys->xpath_orig = darr_strdup(xpath);
ys->translator = translator;
@@ -189,6 +192,9 @@ static inline void nb_op_free_yield_state(struct nb_op_yield_state *ys,
darr_free(ys->non_specific_predicate);
darr_free(ys->query_tokstr);
darr_free(ys->schema_path);
+ /* need to cleanup resources, so pop these individually */
+ while (darr_len(ys->node_infos))
+ ys_pop_inner(ys);
darr_free(ys->node_infos);
darr_free(ys->xpath_orig);
darr_free(ys->xpath);
@@ -223,10 +229,20 @@ static void ys_trim_xpath(struct nb_op_yield_state *ys)
static void ys_pop_inner(struct nb_op_yield_state *ys)
{
- uint len = darr_len(ys->node_infos);
+ struct nb_op_node_info *ni, *pni;
+ struct nb_node *nb_node;
+ int i = darr_lasti(ys->node_infos);
- assert(len);
- darr_setlen(ys->node_infos, len - 1);
+ pni = i > 0 ? &ys->node_infos[i - 1] : NULL;
+ ni = &ys->node_infos[i];
+
+ /* list_entry's propagate so only free the first occurance */
+ if (ni->list_entry && (!pni || pni->list_entry != ni->list_entry)) {
+ nb_node = ni->schema ? ni->schema->priv : NULL;
+ if (nb_node)
+ nb_op_list_list_entry_done(ys, nb_node, pni, ni->list_entry);
+ }
+ darr_setlen(ys->node_infos, i);
ys_trim_xpath(ys);
}
@@ -873,6 +889,14 @@ static enum nb_error nb_op_list_get_keys(struct nb_op_yield_state *ys, struct nb
return 0;
}
+static void nb_op_list_list_entry_done(struct nb_op_yield_state *ys, struct nb_node *nb_node,
+ const struct nb_op_node_info *pni, const void *list_entry)
+{
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_HAS_GET_TREE))
+ return;
+
+ nb_callback_list_entry_done(nb_node, pni ? pni->list_entry : NULL, list_entry);
+}
/**
* nb_op_add_leaf() - Add leaf data to the get tree results
@@ -1154,8 +1178,8 @@ static const struct lysc_node *nb_op_sib_first(struct nb_op_yield_state *ys,
*
* If the schema path (original query) is longer than our current node
* info stack (current xpath location), we are building back up to the
- * base of the user query, return the next schema node from the query
- * string (schema_path).
+ * base of the walk at the end of the user query path, return the next
+ * schema node from the query string (schema_path).
*/
if (last != NULL)
assert(last->schema == parent);
@@ -1526,6 +1550,18 @@ static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume)
*/
assert(!list_start);
is_specific_node = true;
+
+ /*
+ * Release the entry back to the daemon
+ */
+ assert(ni->list_entry == list_entry);
+ nb_op_list_list_entry_done(ys, nn, pni, list_entry);
+ ni->list_entry = NULL;
+
+ /*
+ * Continue on as we may reap the resulting node
+ * if empty.
+ */
list_entry = NULL;
}
@@ -1606,6 +1642,18 @@ static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume)
}
/*
+ * The walk API is that get/lookup_next returns NULL
+ * when done, those callbacks are also is responsible
+ * for releasing any state associated with previous
+ * list_entry's (e.g., any locks) during the iteration.
+ * Therefore we need to zero out the last top level
+ * list_entry so we don't mistakenly call the
+ * list_entry_done() callback on it.
+ */
+ if (!is_specific_node && !list_start && !list_entry)
+ ni->list_entry = NULL;
+
+ /*
* (FN:A) Reap empty list element? Check to see if we
* should reap an empty list element. We do this if the
* empty list element exists at or below the query base
@@ -1620,17 +1668,15 @@ static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume)
* have no non-key children, check for this condition
* and do not reap if true.
*/
- if (!list_start && ni->inner &&
- !lyd_child_no_keys(ni->inner) &&
+ if (!list_start && ni->inner && !lyd_child_no_keys(ni->inner) &&
/* not the top element with a key match */
- !((darr_ilen(ys->node_infos) ==
- darr_ilen(ys->schema_path) - 1) &&
+ !(darr_ilen(ys->schema_path) && /* quiet clang-analyzer :( */
+ (darr_ilen(ys->node_infos) == darr_ilen(ys->schema_path) - 1) &&
lysc_is_key((*darr_last(ys->schema_path)))) &&
- /* is this at or below the base? */
- darr_ilen(ys->node_infos) <= ys->query_base_level)
+ /* is this list entry below the query base? */
+ darr_ilen(ys->node_infos) - 1 < ys->query_base_level)
ys_free_inner(ys, ni);
-
if (!list_entry) {
/*
* List Iteration Done
@@ -1725,12 +1771,15 @@ static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume)
ni->xpath_len = len;
}
+ /* Save the new list_entry early so it can be cleaned up on error */
+ ni->list_entry = list_entry;
+ ni->schema = sib;
+
/* Need to get keys. */
if (!CHECK_FLAG(nn->flags, F_NB_NODE_KEYLESS_LIST)) {
ret = nb_op_list_get_keys(ys, nn, list_entry, &ni->keys);
if (ret) {
- darr_pop(ys->node_infos);
ret = NB_ERR_RESOURCE;
goto done;
}
@@ -1765,7 +1814,6 @@ static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume)
.inner,
sib, &ni->keys, &node);
if (err) {
- darr_pop(ys->node_infos);
ret = NB_ERR_RESOURCE;
goto done;
}
@@ -1775,8 +1823,7 @@ static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume)
* Save the new list entry with the list node info
*/
ni->inner = node;
- ni->schema = node->schema;
- ni->list_entry = list_entry;
+ assert(ni->schema == node->schema);
ni->niters += 1;
ni->nents += 1;
diff --git a/lib/resolver.c b/lib/resolver.c
index 901ccf8132..62b98091ec 100644
--- a/lib/resolver.c
+++ b/lib/resolver.c
@@ -282,7 +282,7 @@ static void resolver_cb_literal(struct event *t)
callback = query->callback;
query->callback = NULL;
- callback(query, ARES_SUCCESS, 1, &query->literal_addr);
+ callback(query, NULL, 1, &query->literal_addr);
}
void resolver_resolve(struct resolver_query *query, int af, vrf_id_t vrf_id,
diff --git a/pathd/path_cli.c b/pathd/path_cli.c
index bf8a9ea028..27236667b1 100644
--- a/pathd/path_cli.c
+++ b/pathd/path_cli.c
@@ -234,7 +234,7 @@ DEFPY_NOSH(
/*
* XPath: /frr-pathd:pathd/srte/segment-list
*/
-DEFPY_NOSH(
+DEFPY_YANG_NOSH(
srte_segment_list,
srte_segment_list_cmd,
"segment-list WORD$name",
@@ -267,7 +267,7 @@ DEFPY_NOSH(
return ret;
}
-DEFPY(srte_no_segment_list,
+DEFPY_YANG(srte_no_segment_list,
srte_no_segment_list_cmd,
"no segment-list WORD$name",
NO_STR
@@ -463,7 +463,7 @@ int segment_list_has_prefix(
* XPath: /frr-pathd:pathd/srte/segment-list/segment
*/
/* clang-format off */
-DEFPY(srte_segment_list_segment, srte_segment_list_segment_cmd,
+DEFPY_YANG(srte_segment_list_segment, srte_segment_list_segment_cmd,
"index (0-4294967295)$index <[mpls$has_mpls_label label (16-1048575)$label] "
"|"
"[nai$has_nai <"
@@ -527,7 +527,7 @@ DEFPY(srte_segment_list_segment, srte_segment_list_segment_cmd,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(srte_segment_list_no_segment,
+DEFPY_YANG(srte_segment_list_no_segment,
srte_segment_list_no_segment_cmd,
"no index (0-4294967295)$index",
NO_STR
@@ -607,7 +607,7 @@ void cli_show_srte_segment_list_segment(struct vty *vty,
/*
* XPath: /frr-pathd:pathd/policy
*/
-DEFPY_NOSH(
+DEFPY_YANG_NOSH(
srte_policy,
srte_policy_cmd,
"policy color (0-4294967295)$num endpoint <A.B.C.D|X:X::X:X>$endpoint",
@@ -633,7 +633,7 @@ DEFPY_NOSH(
return ret;
}
-DEFPY(srte_no_policy,
+DEFPY_YANG(srte_no_policy,
srte_no_policy_cmd,
"no policy color (0-4294967295)$num endpoint <A.B.C.D|X:X::X:X>$endpoint",
NO_STR
@@ -670,7 +670,7 @@ void cli_show_srte_policy_end(struct vty *vty, const struct lyd_node *dnode)
/*
* XPath: /frr-pathd:pathd/srte/policy/name
*/
-DEFPY(srte_policy_name,
+DEFPY_YANG(srte_policy_name,
srte_policy_name_cmd,
"name WORD$name",
"Segment Routing Policy name\n"
@@ -681,7 +681,7 @@ DEFPY(srte_policy_name,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(srte_policy_no_name,
+DEFPY_YANG(srte_policy_no_name,
srte_policy_no_name_cmd,
"no name [WORD]",
NO_STR
@@ -703,7 +703,7 @@ void cli_show_srte_policy_name(struct vty *vty, const struct lyd_node *dnode,
/*
* XPath: /frr-pathd:pathd/srte/policy/binding-sid
*/
-DEFPY(srte_policy_binding_sid,
+DEFPY_YANG(srte_policy_binding_sid,
srte_policy_binding_sid_cmd,
"binding-sid (16-1048575)$label",
"Segment Routing Policy Binding-SID\n"
@@ -714,7 +714,7 @@ DEFPY(srte_policy_binding_sid,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(srte_policy_no_binding_sid,
+DEFPY_YANG(srte_policy_no_binding_sid,
srte_policy_no_binding_sid_cmd,
"no binding-sid [(16-1048575)]",
NO_STR
@@ -736,7 +736,7 @@ void cli_show_srte_policy_binding_sid(struct vty *vty,
/*
* XPath: /frr-pathd:pathd/srte/policy/candidate-path
*/
-DEFPY(srte_policy_candidate_exp,
+DEFPY_YANG(srte_policy_candidate_exp,
srte_policy_candidate_exp_cmd,
"candidate-path preference (0-4294967295)$preference name WORD$name \
explicit segment-list WORD$list_name",
@@ -760,7 +760,7 @@ DEFPY(srte_policy_candidate_exp,
preference_str);
}
-DEFPY_NOSH(
+DEFPY_YANG_NOSH(
srte_policy_candidate_dyn,
srte_policy_candidate_dyn_cmd,
"candidate-path preference (0-4294967295)$preference name WORD$name dynamic",
@@ -791,7 +791,7 @@ DEFPY_NOSH(
return ret;
}
-DEFPY(srte_candidate_bandwidth,
+DEFPY_YANG(srte_candidate_bandwidth,
srte_candidate_bandwidth_cmd,
"bandwidth BANDWIDTH$value [required$required]",
"Define a bandwidth constraint\n"
@@ -805,7 +805,7 @@ DEFPY(srte_candidate_bandwidth,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(srte_candidate_no_bandwidth,
+DEFPY_YANG(srte_candidate_no_bandwidth,
srte_candidate_no_bandwidth_cmd,
"no bandwidth [BANDWIDTH$value] [required$required]",
NO_STR
@@ -818,7 +818,7 @@ DEFPY(srte_candidate_no_bandwidth,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(srte_candidate_affinity_filter, srte_candidate_affinity_filter_cmd,
+DEFPY_YANG(srte_candidate_affinity_filter, srte_candidate_affinity_filter_cmd,
"affinity <exclude-any|include-any|include-all>$type BITPATTERN$value",
"Affinity constraint\n"
"Exclude any matching link\n"
@@ -842,7 +842,7 @@ DEFPY(srte_candidate_affinity_filter, srte_candidate_affinity_filter_cmd,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(srte_candidate_no_affinity_filter, srte_candidate_no_affinity_filter_cmd,
+DEFPY_YANG(srte_candidate_no_affinity_filter, srte_candidate_no_affinity_filter_cmd,
"no affinity <exclude-any|include-any|include-all>$type [BITPATTERN$value]",
NO_STR
"Affinity constraint\n"
@@ -858,7 +858,7 @@ DEFPY(srte_candidate_no_affinity_filter, srte_candidate_no_affinity_filter_cmd,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(srte_candidate_metric,
+DEFPY_YANG(srte_candidate_metric,
srte_candidate_metric_cmd,
"metric [bound$bound] <igp|te|hc|abc|lmll|cigp|cte|pigp|pte|phc|msd|pd|pdv|pl|ppd|ppdv|ppl|nap|nlp|dc|bnc>$type METRIC$value [required$required] [computed$computed]",
"Define a metric constraint\n"
@@ -907,7 +907,7 @@ DEFPY(srte_candidate_metric,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(srte_candidate_no_metric,
+DEFPY_YANG(srte_candidate_no_metric,
srte_candidate_no_metric_cmd,
"no metric [bound] <igp|te|hc|abc|lmll|cigp|cte|pigp|pte|phc|msd|pd|pdv|pl|ppd|ppdv|ppl|nap|nlp|dc|bnc>$type [METRIC$value] [required$required] [computed$computed]",
NO_STR
@@ -945,7 +945,7 @@ DEFPY(srte_candidate_no_metric,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(srte_policy_no_candidate,
+DEFPY_YANG(srte_policy_no_candidate,
srte_policy_no_candidate_cmd,
"no candidate-path\
preference (0-4294967295)$preference\
@@ -971,7 +971,7 @@ DEFPY(srte_policy_no_candidate,
preference_str);
}
-DEFPY(srte_candidate_objfun,
+DEFPY_YANG(srte_candidate_objfun,
srte_candidate_objfun_cmd,
"objective-function <mcp|mlp|mbp|mbc|mll|mcc|spt|mct|mplp|mup|mrup|mtd|mbn|mctd|msl|mss|msn>$type [required$required]",
"Define an objective function constraint\n"
@@ -1006,7 +1006,7 @@ DEFPY(srte_candidate_objfun,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(srte_candidate_no_objfun,
+DEFPY_YANG(srte_candidate_no_objfun,
srte_candidate_no_objfun_cmd,
"no objective-function [<mcp|mlp|mbp|mbc|mll|mcc|spt|mct|mplp|mup|mrup|mtd|mbn|mctd|msl|mss|msn>] [required$required]",
NO_STR
diff --git a/pimd/pim6_cmd.c b/pimd/pim6_cmd.c
index 8297911828..ad11bb1822 100644
--- a/pimd/pim6_cmd.c
+++ b/pimd/pim6_cmd.c
@@ -999,6 +999,20 @@ DEFPY (interface_no_ipv6_mroute,
source_str);
}
+DEFPY_YANG(interface_ipv6_pim_use_source,
+ interface_ipv6_pim_use_source_cmd,
+ "[no] ipv6 pim use-source X:X::X:X$source",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Configure primary IPv6 address\n"
+ "Source IPv6 address\n")
+{
+ nb_cli_enqueue_change(vty, "./use-source", NB_OP_MODIFY, no ? "::" : source_str);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH, "frr-routing:ipv6");
+}
+
DEFPY (pim6_rp,
pim6_rp_cmd,
"rp X:X::X:X$rp [X:X::X:X/M]$gp",
@@ -2972,6 +2986,8 @@ void pim_cmd_init(void)
install_element(INTERFACE_NODE, &interface_ipv6_mld_limits_cmd);
install_element(INTERFACE_NODE, &no_interface_ipv6_mld_limits_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_pim_use_source_cmd);
+
/* Install BSM command */
install_element(INTERFACE_NODE, &ipv6_pim_bsm_cmd);
install_element(INTERFACE_NODE, &no_ipv6_pim_bsm_cmd);
diff --git a/pimd/pim_autorp.c b/pimd/pim_autorp.c
index d3f3517efd..baa56a2d69 100644
--- a/pimd/pim_autorp.c
+++ b/pimd/pim_autorp.c
@@ -113,10 +113,33 @@ static void pim_autorp_free(struct pim_autorp *autorp)
XFREE(MTYPE_PIM_AUTORP_ANNOUNCE, autorp->announce_pkt);
}
+static bool autorp_is_pim_interface(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ return CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) && pim_ifp && pim_ifp->pim_enable &&
+ !pim_ifp->pim_passive_enable;
+}
+
+static bool pim_autorp_should_enable_socket(struct pim_autorp *autorp)
+{
+ struct interface *ifp;
+
+ /* Only enable the socket if there are any PIM enabled interfaces */
+ FOR_ALL_INTERFACES (autorp->pim->vrf, ifp) {
+ if (autorp_is_pim_interface(ifp))
+ return true;
+ }
+ return false;
+}
+
static bool pim_autorp_should_close(struct pim_autorp *autorp)
{
- /* If discovery or mapping agent is active, then we need the socket open */
- return !autorp->do_discovery && !autorp->send_rp_discovery;
+ /* If discovery or mapping agent is active, then we need the socket open. We also want to leave
+ * the socket open if there are any pim interfaces and we have an announcement packet to send.
+ */
+ return !autorp->do_discovery && !autorp->send_rp_discovery &&
+ !(pim_autorp_should_enable_socket(autorp) && autorp->announce_timer != NULL);
}
static bool pim_autorp_join_groups(struct interface *ifp)
@@ -283,8 +306,8 @@ static bool autorp_recv_announcement(struct pim_autorp *autorp, uint8_t rpcnt, u
/* Ignore RP's limited to PIM version 1 or with an unknown version */
if (rp->pimver == AUTORP_PIM_V1 || rp->pimver == AUTORP_PIM_VUNKNOWN) {
if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: Ignoring unsupported PIM version (%u) in AutoRP Announcement for RP %pI4",
- __func__, rp->pimver, (in_addr_t *)&(rp->addr));
+ zlog_debug("%s: Ignoring unsupported PIM version (%u) in AutoRP Announcement for RP %pPA",
+ __func__, rp->pimver, &rp_addr);
/* Update the offset to skip past the groups advertised for this RP */
offset += (AUTORP_GRPLEN * rp->grpcnt);
continue;
@@ -293,14 +316,14 @@ static bool autorp_recv_announcement(struct pim_autorp *autorp, uint8_t rpcnt, u
if (rp->grpcnt == 0) {
/* No groups?? */
if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: Announcement message has no groups for RP %pI4",
- __func__, (in_addr_t *)&(rp->addr));
+ zlog_debug("%s: Announcement message has no groups for RP %pPA",
+ __func__, &rp_addr);
continue;
}
if ((buf_size - offset) < AUTORP_GRPLEN) {
- zlog_warn("%s: Buffer underrun parsing groups for RP %pI4", __func__,
- (in_addr_t *)&(rp->addr));
+ zlog_warn("%s: Buffer underrun parsing groups for RP %pPA", __func__,
+ &rp_addr);
return false;
}
@@ -684,8 +707,14 @@ static void autorp_send_discovery_on(struct pim_autorp *autorp)
int interval = 5;
/* Make sure the socket is open and ready */
- if (!pim_autorp_socket_enable(autorp)) {
- zlog_err("%s: AutoRP failed to open socket", __func__);
+ if (pim_autorp_should_enable_socket(autorp)) {
+ if (!pim_autorp_socket_enable(autorp)) {
+ zlog_err("%s: AutoRP failed to open socket", __func__);
+ return;
+ }
+ } else {
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: No PIM interfaces, not enabling socket", __func__);
return;
}
@@ -765,14 +794,14 @@ static bool autorp_recv_discovery(struct pim_autorp *autorp, uint8_t rpcnt, uint
rp_addr.s_addr = rp->addr;
if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: Parsing RP %pI4 (grpcnt=%u)", __func__,
- (in_addr_t *)&rp->addr, rp->grpcnt);
+ zlog_debug("%s: Parsing RP %pPA (grpcnt=%u)", __func__, &rp_addr,
+ rp->grpcnt);
/* Ignore RP's limited to PIM version 1 or with an unknown version */
if (rp->pimver == AUTORP_PIM_V1 || rp->pimver == AUTORP_PIM_VUNKNOWN) {
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: Ignoring unsupported PIM version in AutoRP Discovery for RP %pI4",
- __func__, (in_addr_t *)&(rp->addr));
+ __func__, &rp_addr);
/* Update the offset to skip past the groups advertised for this RP */
offset += (AUTORP_GRPLEN * rp->grpcnt);
continue;
@@ -781,17 +810,16 @@ static bool autorp_recv_discovery(struct pim_autorp *autorp, uint8_t rpcnt, uint
if (rp->grpcnt == 0) {
/* No groups?? */
if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: Discovery message has no groups for RP %pI4",
- __func__, (in_addr_t *)&(rp->addr));
+ zlog_debug("%s: Discovery message has no groups for RP %pPA",
+ __func__, &rp_addr);
continue;
}
/* Make sure there is enough buffer to parse all the groups */
if ((buf_size - offset) < (AUTORP_GRPLEN * rp->grpcnt)) {
if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: Buffer underrun parsing groups for RP %pI4 (%u < %u)",
- __func__, (in_addr_t *)&(rp->addr),
- (uint32_t)(buf_size - offset),
+ zlog_debug("%s: Buffer underrun parsing groups for RP %pPA (%u < %u)",
+ __func__, &rp_addr, (uint32_t)(buf_size - offset),
(uint32_t)(AUTORP_GRPLEN * rp->grpcnt));
return false;
}
@@ -809,8 +837,7 @@ static bool autorp_recv_discovery(struct pim_autorp *autorp, uint8_t rpcnt, uint
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: Parsing group %s%pFX for RP %pI4", __func__,
- (grp->negprefix ? "!" : ""), &grppfix,
- (in_addr_t *)&rp->addr);
+ (grp->negprefix ? "!" : ""), &grppfix, &rp_addr);
if (!pim_autorp_add_rp(autorp, rp_addr, grppfix, NULL, holdtime))
success = false;
@@ -856,9 +883,9 @@ static bool autorp_recv_discovery(struct pim_autorp *autorp, uint8_t rpcnt, uint
prefix_list_entry_update_finish(ple);
if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: Parsing group %s%pFX for RP %pI4", __func__,
+ zlog_debug("%s: Parsing group %s%pFX for RP %pPA", __func__,
(grp->negprefix ? "!" : ""), &ple->prefix,
- (in_addr_t *)&rp->addr);
+ &rp_addr);
}
if (!pim_autorp_add_rp(autorp, rp_addr, grppfix, plname, holdtime))
@@ -964,6 +991,7 @@ err:
return;
}
+static void pim_autorp_new_announcement(struct pim_instance *pim);
static bool pim_autorp_socket_enable(struct pim_autorp *autorp)
{
int fd;
@@ -1006,9 +1034,19 @@ static bool pim_autorp_socket_enable(struct pim_autorp *autorp)
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: AutoRP socket enabled (fd=%u)", __func__, fd);
+ if (autorp->do_discovery)
+ autorp_read_on(autorp);
+
+ if (autorp->send_rp_discovery)
+ autorp_send_discovery_on(autorp);
+
+ /* Try to build a new announcement to make sure the send timer is enabled */
+ pim_autorp_new_announcement(autorp->pim);
+
return true;
}
+static void autorp_announcement_off(struct pim_autorp *autorp);
static bool pim_autorp_socket_disable(struct pim_autorp *autorp)
{
/* Return early if socket is already disabled */
@@ -1022,6 +1060,8 @@ static bool pim_autorp_socket_disable(struct pim_autorp *autorp)
return false;
}
+ autorp_send_discovery_off(autorp);
+ autorp_announcement_off(autorp);
autorp_read_off(autorp);
autorp->sock = -1;
@@ -1058,8 +1098,7 @@ static void autorp_send_announcement(struct event *evt)
/* Only send on active interfaces with full pim enabled, non-passive
* and have a primary address set.
*/
- if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) && pim_ifp &&
- pim_ifp->pim_enable && !pim_ifp->pim_passive_enable &&
+ if (autorp_is_pim_interface(ifp) &&
!pim_addr_is_any(pim_ifp->primary_address)) {
if (setsockopt(autorp->sock, IPPROTO_IP, IP_MULTICAST_IF,
&(pim_ifp->primary_address),
@@ -1108,6 +1147,10 @@ static void autorp_announcement_off(struct pim_autorp *autorp)
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: AutoRP announcement sending disabled", __func__);
event_cancel(&(autorp->announce_timer));
+
+ /* Close the socket if we need to */
+ if (pim_autorp_should_close(autorp) && !pim_autorp_socket_disable(autorp))
+ zlog_warn("%s: AutoRP failed to close socket", __func__);
}
/* Pack the groups of the RP
@@ -1279,8 +1322,20 @@ static void pim_autorp_new_announcement(struct pim_instance *pim)
autorp->announce_pkt_sz += sizeof(struct autorp_pkt_hdr);
/* Only turn on the announcement timer if we have a packet to send */
- if (autorp->announce_pkt_sz >= MIN_AUTORP_PKT_SZ)
+ if (autorp->announce_pkt_sz >= MIN_AUTORP_PKT_SZ) {
+ /* We are sending an announcement, but discovery could be off, so make sure the socket is open */
+ if (pim_autorp_should_enable_socket(autorp)) {
+ if (!pim_autorp_socket_enable(autorp)) {
+ zlog_err("%s: AutoRP failed to open socket", __func__);
+ return;
+ }
+ } else {
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: No PIM interfaces, not enabling socket", __func__);
+ return;
+ }
autorp_announcement_on(autorp);
+ }
}
void pim_autorp_prefix_list_update(struct pim_instance *pim, struct prefix_list *plist)
@@ -1453,10 +1508,15 @@ void pim_autorp_add_ifp(struct interface *ifp)
struct pim_interface *pim_ifp;
pim_ifp = ifp->info;
- if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) && pim_ifp && pim_ifp->pim_enable) {
+ if (autorp_is_pim_interface(ifp)) {
pim = pim_ifp->pim;
- if (pim && pim->autorp &&
- (pim->autorp->do_discovery || pim->autorp->send_rp_discovery)) {
+ if (pim && pim->autorp && !pim_autorp_should_close(pim->autorp)) {
+ /* Make sure the socket is open and ready */
+ if (!pim_autorp_socket_enable(pim->autorp)) {
+ zlog_err("%s: AutoRP failed to open socket", __func__);
+ return;
+ }
+
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: Adding interface %s to AutoRP, joining AutoRP groups",
__func__, ifp->name);
@@ -1475,11 +1535,13 @@ void pim_autorp_rm_ifp(struct interface *ifp)
*/
struct pim_instance *pim;
struct pim_interface *pim_ifp;
+ struct pim_autorp *autorp = NULL;
pim_ifp = ifp->info;
if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) && pim_ifp) {
pim = pim_ifp->pim;
if (pim && pim->autorp) {
+ autorp = pim->autorp;
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: Removing interface %s from AutoRP, leaving AutoRP groups",
__func__, ifp->name);
@@ -1488,6 +1550,11 @@ void pim_autorp_rm_ifp(struct interface *ifp)
safe_strerror(errno));
}
}
+
+ if (autorp != NULL && !pim_autorp_should_enable_socket(autorp)) {
+ /* Removed the last pim enabled interface, close the socket */
+ pim_autorp_socket_disable(autorp);
+ }
}
void pim_autorp_start_discovery(struct pim_instance *pim)
@@ -1500,8 +1567,14 @@ void pim_autorp_start_discovery(struct pim_instance *pim)
autorp->do_discovery = true;
/* Make sure the socket is open and ready */
- if (!pim_autorp_socket_enable(autorp)) {
- zlog_err("%s: AutoRP failed to open socket", __func__);
+ if (pim_autorp_should_enable_socket(autorp)) {
+ if (!pim_autorp_socket_enable(autorp)) {
+ zlog_err("%s: AutoRP failed to open socket", __func__);
+ return;
+ }
+ } else {
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: No PIM interfaces, not enabling socket", __func__);
return;
}
diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c
index 50fe543b23..845467755a 100644
--- a/pimd/pim_bsm.c
+++ b/pimd/pim_bsm.c
@@ -1562,8 +1562,7 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
(buf + PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN),
(buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN),
frag_tag)) {
- zlog_warn("BSM from %pPA failed to parse",
- (pim_addr *)&bshdr->bsr_addr.addr);
+ zlog_warn("BSM from %pPA failed to parse", &bsr_addr);
pim->bsm_dropped++;
return -1;
}
@@ -1802,7 +1801,7 @@ bool cand_addrsel_update(struct cand_addrsel *asel, struct vrf *vrf)
case CAND_ADDR_ANY:
is_any = true;
- /* fallthru */
+ fallthrough;
case CAND_ADDR_LO:
FOR_ALL_INTERFACES (vrf, ifp) {
if (!if_is_up(ifp))
diff --git a/pimd/pim_bsr_rpdb.c b/pimd/pim_bsr_rpdb.c
index 860009312d..03ceabd733 100644
--- a/pimd/pim_bsr_rpdb.c
+++ b/pimd/pim_bsr_rpdb.c
@@ -455,6 +455,8 @@ int pim_crp_process(struct interface *ifp, pim_sgaddr *src_dst, uint8_t *buf,
struct pim_interface *pim_ifp = NULL;
struct pim_instance *pim;
struct bsm_scope *scope;
+ size_t ngroups;
+ pim_addr rpaddr = {};
pim_ifp = ifp->info;
if (!pim_ifp) {
@@ -505,20 +507,19 @@ int pim_crp_process(struct interface *ifp, pim_sgaddr *src_dst, uint8_t *buf,
/* ignore trailing data */
(void)buf;
- size_t ngroups = crp_hdr->prefix_cnt;
+ ngroups = crp_hdr->prefix_cnt;
+ rpaddr = crp_hdr->rp_addr.addr;
if (remain < ngroups * sizeof(struct pim_encoded_group_ipv4)) {
if (PIM_DEBUG_BSM)
zlog_debug("truncated Candidate-RP advertisement for RP %pPA from %pPA (too short for %zu groups)",
- (pim_addr *)&crp_hdr->rp_addr.addr,
- &src_dst->src, ngroups);
+ &rpaddr, &src_dst->src, ngroups);
return -1;
}
if (PIM_DEBUG_BSM)
zlog_debug("Candidate-RP: %pPA, prio=%u (from %pPA, %zu groups)",
- (pim_addr *)&crp_hdr->rp_addr.addr, crp_hdr->rp_prio,
- &src_dst->src, ngroups);
+ (pim_addr *)&rpaddr, crp_hdr->rp_prio, &src_dst->src, ngroups);
struct bsr_crp_rp *rp, ref;
@@ -529,16 +530,14 @@ int pim_crp_process(struct interface *ifp, pim_sgaddr *src_dst, uint8_t *buf,
if (!rp) {
if (bsr_crp_rps_count(scope->ebsr_rps) >= bsr_max_rps) {
zlog_err("BSR: number of tracked Candidate RPs (%zu) exceeds DoS-protection limit (%zu), dropping advertisement for RP %pPA (packet source %pPA)",
- bsr_crp_rps_count(scope->ebsr_rps),
- bsr_max_rps, (pim_addr *)&crp_hdr->rp_addr.addr,
- &src_dst->src);
+ bsr_crp_rps_count(scope->ebsr_rps), bsr_max_rps,
+ (pim_addr *)&rpaddr, &src_dst->src);
return -1;
}
if (PIM_DEBUG_BSM)
zlog_debug("new Candidate-RP: %pPA (from %pPA)",
- (pim_addr *)&crp_hdr->rp_addr.addr,
- &src_dst->src);
+ (pim_addr *)&rpaddr, &src_dst->src);
rp = XCALLOC(MTYPE_PIM_BSR_CRP, sizeof(*rp));
rp->scope = scope;
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index f838c401e3..2dcea57051 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -3181,7 +3181,7 @@ DEFPY (clear_ip_mroute_count,
return clear_ip_mroute_count_command(vty, name);
}
-DEFPY(clear_ip_msdp_peer, clear_ip_msdp_peer_cmd,
+DEFPY_YANG(clear_ip_msdp_peer, clear_ip_msdp_peer_cmd,
"clear ip msdp peer A.B.C.D$peer [vrf WORD$vrfname]",
CLEAR_STR
IP_STR
@@ -3399,7 +3399,7 @@ DEFPY_NOSH (router_pim,
return CMD_SUCCESS;
}
-DEFPY (no_router_pim,
+DEFPY_YANG (no_router_pim,
no_router_pim_cmd,
"no router pim [vrf NAME]",
NO_STR
@@ -3424,7 +3424,7 @@ DEFPY (no_router_pim,
}
-DEFPY (pim_spt_switchover_infinity,
+DEFPY_YANG (pim_spt_switchover_infinity,
pim_spt_switchover_infinity_cmd,
"spt-switchover infinity-and-beyond",
"SPT-Switchover\n"
@@ -3632,7 +3632,7 @@ DEFPY_ATTR(no_ip_pim_spt_switchover_infinity_plist,
return ret;
}
-DEFPY (pim_register_accept_list,
+DEFPY_YANG (pim_register_accept_list,
pim_register_accept_list_cmd,
"[no] register-accept-list PREFIXLIST4_NAME$word",
NO_STR
@@ -4237,7 +4237,7 @@ DEFPY (no_ip_igmp_group_watermark,
return CMD_SUCCESS;
}
-DEFPY (pim_v6_secondary,
+DEFPY_YANG (pim_v6_secondary,
pim_v6_secondary_cmd,
"send-v6-secondary",
"Send v6 secondary addresses\n")
@@ -4297,7 +4297,7 @@ DEFPY_ATTR(ip_pim_v6_secondary,
return ret;
}
-DEFPY (no_pim_v6_secondary,
+DEFPY_YANG (no_pim_v6_secondary,
no_pim_v6_secondary_cmd,
"no send-v6-secondary",
NO_STR
@@ -4716,7 +4716,7 @@ DEFPY (pim_bsr_candidate_rp_group,
return pim_process_bsr_crp_grp_cmd(vty, group_str, no);
}
-DEFPY (pim_ssm_prefix_list,
+DEFPY_YANG (pim_ssm_prefix_list,
pim_ssm_prefix_list_cmd,
"ssm prefix-list PREFIXLIST4_NAME$plist",
"Source Specific Multicast\n"
@@ -4776,7 +4776,7 @@ DEFPY_ATTR(ip_pim_ssm_prefix_list,
return ret;
}
-DEFPY (no_pim_ssm_prefix_list,
+DEFPY_YANG (no_pim_ssm_prefix_list,
no_pim_ssm_prefix_list_cmd,
"no ssm prefix-list",
NO_STR
@@ -4836,7 +4836,7 @@ DEFPY_ATTR(no_ip_pim_ssm_prefix_list,
return ret;
}
-DEFPY (no_pim_ssm_prefix_list_name,
+DEFPY_YANG (no_pim_ssm_prefix_list_name,
no_pim_ssm_prefix_list_name_cmd,
"no ssm prefix-list PREFIXLIST4_NAME$plist",
NO_STR
@@ -5128,7 +5128,7 @@ DEFPY_ATTR(no_ip_pim_ssmpingd,
return ret;
}
-DEFPY (pim_ecmp,
+DEFPY_YANG (pim_ecmp,
pim_ecmp_cmd,
"ecmp",
"Enable PIM ECMP \n")
@@ -5183,7 +5183,7 @@ DEFPY_ATTR(ip_pim_ecmp,
return ret;
}
-DEFPY (no_pim_ecmp,
+DEFPY_YANG (no_pim_ecmp,
no_pim_ecmp_cmd,
"no ecmp",
NO_STR
@@ -5240,7 +5240,7 @@ DEFPY_ATTR(no_ip_pim_ecmp,
return ret;
}
-DEFPY (pim_ecmp_rebalance,
+DEFPY_YANG (pim_ecmp_rebalance,
pim_ecmp_rebalance_cmd,
"ecmp rebalance",
"Enable PIM ECMP \n"
@@ -5306,7 +5306,7 @@ DEFPY_ATTR(ip_pim_ecmp_rebalance,
return ret;
}
-DEFPY (no_pim_ecmp_rebalance,
+DEFPY_YANG (no_pim_ecmp_rebalance,
no_pim_ecmp_rebalance_cmd,
"no ecmp rebalance",
NO_STR
@@ -5368,7 +5368,7 @@ DEFPY_ATTR(no_ip_pim_ecmp_rebalance,
return ret;
}
-DEFUN (interface_ip_igmp,
+DEFUN_YANG (interface_ip_igmp,
interface_ip_igmp_cmd,
"ip igmp",
IP_STR
@@ -5380,7 +5380,7 @@ DEFUN (interface_ip_igmp,
"frr-routing:ipv4");
}
-DEFUN (interface_no_ip_igmp,
+DEFUN_YANG (interface_no_ip_igmp,
interface_no_ip_igmp_cmd,
"no ip igmp",
NO_STR
@@ -5464,7 +5464,7 @@ DEFPY_YANG (interface_ip_igmp_static_group,
(src_str ? src_str : "0.0.0.0"));
}
-DEFUN (interface_ip_igmp_query_interval,
+DEFUN_YANG (interface_ip_igmp_query_interval,
interface_ip_igmp_query_interval_cmd,
"ip igmp query-interval (1-65535)",
IP_STR
@@ -5494,7 +5494,7 @@ DEFUN (interface_ip_igmp_query_interval,
"frr-routing:ipv4");
}
-DEFUN (interface_no_ip_igmp_query_interval,
+DEFUN_YANG (interface_no_ip_igmp_query_interval,
interface_no_ip_igmp_query_interval_cmd,
"no ip igmp query-interval [(1-65535)]",
NO_STR
@@ -5509,7 +5509,7 @@ DEFUN (interface_no_ip_igmp_query_interval,
"frr-routing:ipv4");
}
-DEFUN (interface_ip_igmp_version,
+DEFUN_YANG (interface_ip_igmp_version,
interface_ip_igmp_version_cmd,
"ip igmp version (2-3)",
IP_STR
@@ -5526,7 +5526,7 @@ DEFUN (interface_ip_igmp_version,
"frr-routing:ipv4");
}
-DEFUN (interface_no_ip_igmp_version,
+DEFUN_YANG (interface_no_ip_igmp_version,
interface_no_ip_igmp_version_cmd,
"no ip igmp version (2-3)",
NO_STR
@@ -5541,7 +5541,7 @@ DEFUN (interface_no_ip_igmp_version,
"frr-routing:ipv4");
}
-DEFPY (interface_ip_igmp_query_max_response_time,
+DEFPY_YANG (interface_ip_igmp_query_max_response_time,
interface_ip_igmp_query_max_response_time_cmd,
"ip igmp query-max-response-time (1-65535)$qmrt",
IP_STR
@@ -5552,7 +5552,7 @@ DEFPY (interface_ip_igmp_query_max_response_time,
return gm_process_query_max_response_time_cmd(vty, qmrt_str);
}
-DEFUN (interface_no_ip_igmp_query_max_response_time,
+DEFUN_YANG (interface_no_ip_igmp_query_max_response_time,
interface_no_ip_igmp_query_max_response_time_cmd,
"no ip igmp query-max-response-time [(1-65535)]",
NO_STR
@@ -5564,7 +5564,7 @@ DEFUN (interface_no_ip_igmp_query_max_response_time,
return gm_process_no_query_max_response_time_cmd(vty);
}
-DEFUN_HIDDEN (interface_ip_igmp_query_max_response_time_dsec,
+DEFUN_YANG_HIDDEN (interface_ip_igmp_query_max_response_time_dsec,
interface_ip_igmp_query_max_response_time_dsec_cmd,
"ip igmp query-max-response-time-dsec (1-65535)",
IP_STR
@@ -5594,7 +5594,7 @@ DEFUN_HIDDEN (interface_ip_igmp_query_max_response_time_dsec,
"frr-routing:ipv4");
}
-DEFUN_HIDDEN (interface_no_ip_igmp_query_max_response_time_dsec,
+DEFUN_YANG_HIDDEN (interface_no_ip_igmp_query_max_response_time_dsec,
interface_no_ip_igmp_query_max_response_time_dsec_cmd,
"no ip igmp query-max-response-time-dsec [(1-65535)]",
NO_STR
@@ -5904,7 +5904,7 @@ DEFPY (interface_no_ip_pim,
}
/* boundaries */
-DEFUN(interface_ip_pim_boundary_oil,
+DEFUN_YANG(interface_ip_pim_boundary_oil,
interface_ip_pim_boundary_oil_cmd,
"ip multicast boundary oil WORD",
IP_STR
@@ -5916,7 +5916,7 @@ DEFUN(interface_ip_pim_boundary_oil,
return pim_process_ip_pim_boundary_oil_cmd(vty, argv[4]->arg);
}
-DEFUN(interface_no_ip_pim_boundary_oil,
+DEFUN_YANG(interface_no_ip_pim_boundary_oil,
interface_no_ip_pim_boundary_oil_cmd,
"no ip multicast boundary oil [WORD]",
NO_STR
@@ -6653,7 +6653,7 @@ DEFUN_NOSH (show_debugging_pim,
return CMD_SUCCESS;
}
-DEFUN (interface_pim_use_source,
+DEFUN_YANG (interface_pim_use_source,
interface_pim_use_source_cmd,
"ip pim use-source A.B.C.D",
IP_STR
@@ -6668,7 +6668,7 @@ DEFUN (interface_pim_use_source,
"frr-routing:ipv4");
}
-DEFUN (interface_no_pim_use_source,
+DEFUN_YANG (interface_no_pim_use_source,
interface_no_pim_use_source_cmd,
"no ip pim use-source [A.B.C.D]",
NO_STR
@@ -6684,7 +6684,7 @@ DEFUN (interface_no_pim_use_source,
"frr-routing:ipv4");
}
-DEFPY (ip_pim_bfd,
+DEFPY_YANG (ip_pim_bfd,
ip_pim_bfd_cmd,
"ip pim bfd [profile BFDPROF$prof]",
IP_STR
@@ -6717,7 +6717,7 @@ DEFPY (ip_pim_bfd,
"frr-routing:ipv4");
}
-DEFPY(no_ip_pim_bfd_profile, no_ip_pim_bfd_profile_cmd,
+DEFPY_YANG(no_ip_pim_bfd_profile, no_ip_pim_bfd_profile_cmd,
"no ip pim bfd profile [BFDPROF]",
NO_STR
IP_STR
@@ -6733,7 +6733,7 @@ DEFPY(no_ip_pim_bfd_profile, no_ip_pim_bfd_profile_cmd,
"frr-routing:ipv4");
}
-DEFUN (no_ip_pim_bfd,
+DEFUN_YANG (no_ip_pim_bfd,
no_ip_pim_bfd_cmd,
"no ip pim bfd",
NO_STR
@@ -6748,7 +6748,7 @@ DEFUN (no_ip_pim_bfd,
"frr-routing:ipv4");
}
-DEFUN (ip_pim_bsm,
+DEFUN_YANG (ip_pim_bsm,
ip_pim_bsm_cmd,
"ip pim bsm",
IP_STR
@@ -6757,7 +6757,7 @@ DEFUN (ip_pim_bsm,
{
return pim_process_bsm_cmd(vty);
}
-DEFUN (no_ip_pim_bsm,
+DEFUN_YANG (no_ip_pim_bsm,
no_ip_pim_bsm_cmd,
"no ip pim bsm",
NO_STR
@@ -6768,7 +6768,7 @@ DEFUN (no_ip_pim_bsm,
return pim_process_no_bsm_cmd(vty);
}
-DEFUN (ip_pim_ucast_bsm,
+DEFUN_YANG (ip_pim_ucast_bsm,
ip_pim_ucast_bsm_cmd,
"ip pim unicast-bsm",
IP_STR
@@ -6778,7 +6778,7 @@ DEFUN (ip_pim_ucast_bsm,
return pim_process_unicast_bsm_cmd(vty);
}
-DEFUN (no_ip_pim_ucast_bsm,
+DEFUN_YANG (no_ip_pim_ucast_bsm,
no_ip_pim_ucast_bsm_cmd,
"no ip pim unicast-bsm",
NO_STR
@@ -6790,7 +6790,7 @@ DEFUN (no_ip_pim_ucast_bsm,
}
#if HAVE_BFDD > 0
-DEFUN_HIDDEN (
+DEFUN_YANG_HIDDEN (
ip_pim_bfd_param,
ip_pim_bfd_param_cmd,
"ip pim bfd (2-255) (1-65535) (1-65535)",
@@ -6801,7 +6801,7 @@ DEFUN_HIDDEN (
"Required min receive interval\n"
"Desired min transmit interval\n")
#else
- DEFUN(
+ DEFUN_YANG(
ip_pim_bfd_param,
ip_pim_bfd_param_cmd,
"ip pim bfd (2-255) (1-65535) (1-65535)",
@@ -6855,7 +6855,7 @@ ALIAS(no_ip_pim_bfd, no_ip_pim_bfd_param_cmd,
"Desired min transmit interval\n")
#endif /* !HAVE_BFDD */
-DEFPY(pim_msdp_peer, pim_msdp_peer_cmd,
+DEFPY_YANG(pim_msdp_peer, pim_msdp_peer_cmd,
"msdp peer A.B.C.D$peer source A.B.C.D$source",
CFG_MSDP_STR
"Configure MSDP peer\n"
@@ -6920,7 +6920,7 @@ DEFPY_ATTR(ip_pim_msdp_peer,
return ret;
}
-DEFPY(msdp_peer_md5, msdp_peer_md5_cmd,
+DEFPY_YANG(msdp_peer_md5, msdp_peer_md5_cmd,
"msdp peer A.B.C.D$peer password WORD$psk",
CFG_MSDP_STR
"Configure MSDP peer\n"
@@ -6945,7 +6945,7 @@ DEFPY(msdp_peer_md5, msdp_peer_md5_cmd,
return nb_cli_apply_changes(vty, "%s", xpath);
}
-DEFPY(no_msdp_peer_md5, no_msdp_peer_md5_cmd,
+DEFPY_YANG(no_msdp_peer_md5, no_msdp_peer_md5_cmd,
"no msdp peer A.B.C.D$peer password [WORD]",
NO_STR
CFG_MSDP_STR
@@ -6971,7 +6971,7 @@ DEFPY(no_msdp_peer_md5, no_msdp_peer_md5_cmd,
return nb_cli_apply_changes(vty, "%s", xpath);
}
-DEFPY(pim_msdp_timers, pim_msdp_timers_cmd,
+DEFPY_YANG(pim_msdp_timers, pim_msdp_timers_cmd,
"msdp timers (1-65535)$keepalive (1-65535)$holdtime [(1-65535)$connretry]",
CFG_MSDP_STR
"MSDP timers configuration\n"
@@ -7046,7 +7046,7 @@ DEFPY_ATTR(ip_pim_msdp_timers,
return ret;
}
-DEFPY(no_pim_msdp_timers, no_pim_msdp_timers_cmd,
+DEFPY_YANG(no_pim_msdp_timers, no_pim_msdp_timers_cmd,
"no msdp timers [(1-65535) (1-65535) [(1-65535)]]",
NO_STR
CFG_MSDP_STR
@@ -7110,7 +7110,7 @@ DEFPY_ATTR(no_ip_pim_msdp_timers,
return ret;
}
-DEFPY (no_pim_msdp_peer,
+DEFPY_YANG (no_pim_msdp_peer,
no_pim_msdp_peer_cmd,
"no msdp peer A.B.C.D",
NO_STR
@@ -7172,7 +7172,7 @@ DEFPY_ATTR(no_ip_pim_msdp_peer,
return ret;
}
-DEFPY(msdp_peer_sa_filter, msdp_peer_sa_filter_cmd,
+DEFPY_YANG(msdp_peer_sa_filter, msdp_peer_sa_filter_cmd,
"msdp peer A.B.C.D$peer sa-filter ACL_NAME$acl_name <in|out>$dir",
CFG_MSDP_STR
"Configure MSDP peer\n"
@@ -7203,7 +7203,7 @@ DEFPY(msdp_peer_sa_filter, msdp_peer_sa_filter_cmd,
return nb_cli_apply_changes(vty, "%s", xpath);
}
-DEFPY(no_msdp_peer_sa_filter, no_ip_msdp_peer_sa_filter_cmd,
+DEFPY_YANG(no_msdp_peer_sa_filter, no_ip_msdp_peer_sa_filter_cmd,
"no msdp peer A.B.C.D$peer sa-filter ACL_NAME <in|out>$dir",
NO_STR
CFG_MSDP_STR
@@ -7235,7 +7235,7 @@ DEFPY(no_msdp_peer_sa_filter, no_ip_msdp_peer_sa_filter_cmd,
return nb_cli_apply_changes(vty, "%s", xpath);
}
-DEFPY(pim_msdp_mesh_group_member,
+DEFPY_YANG(pim_msdp_mesh_group_member,
pim_msdp_mesh_group_member_cmd,
"msdp mesh-group WORD$gname member A.B.C.D$maddr",
CFG_MSDP_STR
@@ -7313,7 +7313,7 @@ DEFPY_ATTR(ip_pim_msdp_mesh_group_member,
return ret;
}
-DEFPY(no_pim_msdp_mesh_group_member,
+DEFPY_YANG(no_pim_msdp_mesh_group_member,
no_pim_msdp_mesh_group_member_cmd,
"no msdp mesh-group WORD$gname member A.B.C.D$maddr",
NO_STR
@@ -7432,7 +7432,7 @@ DEFPY_ATTR(no_ip_pim_msdp_mesh_group_member,
return ret;
}
-DEFPY(pim_msdp_mesh_group_source,
+DEFPY_YANG(pim_msdp_mesh_group_source,
pim_msdp_mesh_group_source_cmd,
"msdp mesh-group WORD$gname source A.B.C.D$saddr",
CFG_MSDP_STR
@@ -7505,7 +7505,7 @@ DEFPY_ATTR(ip_pim_msdp_mesh_group_source,
return ret;
}
-DEFPY(no_pim_msdp_mesh_group_source,
+DEFPY_YANG(no_pim_msdp_mesh_group_source,
no_pim_msdp_mesh_group_source_cmd,
"no msdp mesh-group WORD$gname source [A.B.C.D]",
NO_STR
@@ -7593,7 +7593,7 @@ DEFPY_ATTR(no_ip_pim_msdp_mesh_group_source,
return ret;
}
-DEFPY(no_pim_msdp_mesh_group,
+DEFPY_YANG(no_pim_msdp_mesh_group,
no_pim_msdp_mesh_group_cmd,
"no msdp mesh-group WORD$gname",
NO_STR
@@ -7661,7 +7661,7 @@ DEFPY_ATTR(no_ip_pim_msdp_mesh_group,
return ret;
}
-DEFPY(msdp_shutdown,
+DEFPY_YANG(msdp_shutdown,
msdp_shutdown_cmd,
"[no] msdp shutdown",
NO_STR
@@ -7679,7 +7679,7 @@ DEFPY(msdp_shutdown,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(msdp_peer_sa_limit, msdp_peer_sa_limit_cmd,
+DEFPY_YANG(msdp_peer_sa_limit, msdp_peer_sa_limit_cmd,
"[no] msdp peer A.B.C.D$peer sa-limit ![(1-4294967294)$sa_limit]",
NO_STR
CFG_MSDP_STR
@@ -7702,7 +7702,7 @@ DEFPY(msdp_peer_sa_limit, msdp_peer_sa_limit_cmd,
return nb_cli_apply_changes(vty, "%s", xpath);
}
-DEFPY(msdp_originator_id, msdp_originator_id_cmd,
+DEFPY_YANG(msdp_originator_id, msdp_originator_id_cmd,
"[no] msdp originator-id ![A.B.C.D$originator_id]",
NO_STR
CFG_MSDP_STR
@@ -8448,7 +8448,7 @@ DEFUN (show_ip_msdp_sa_sg_vrf_all,
return CMD_SUCCESS;
}
-DEFPY(msdp_log_neighbor_changes, msdp_log_neighbor_changes_cmd,
+DEFPY_YANG(msdp_log_neighbor_changes, msdp_log_neighbor_changes_cmd,
"[no] msdp log neighbor-events",
NO_STR
MSDP_STR
@@ -8463,7 +8463,7 @@ DEFPY(msdp_log_neighbor_changes, msdp_log_neighbor_changes_cmd,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY(msdp_log_sa_changes, msdp_log_sa_changes_cmd,
+DEFPY_YANG(msdp_log_sa_changes, msdp_log_sa_changes_cmd,
"[no] msdp log sa-events",
NO_STR
MSDP_STR
@@ -8751,7 +8751,7 @@ DEFUN_HIDDEN (show_ip_pim_vxlan_sg_work,
return CMD_SUCCESS;
}
-DEFPY_HIDDEN (no_pim_mlag,
+DEFPY_YANG_HIDDEN (no_pim_mlag,
no_pim_mlag_cmd,
"no mlag",
NO_STR
@@ -8808,7 +8808,7 @@ DEFPY_ATTR(no_ip_pim_mlag,
return ret;
}
-DEFPY_HIDDEN (pim_mlag,
+DEFPY_YANG_HIDDEN (pim_mlag,
pim_mlag_cmd,
"mlag INTERFACE$iface role [primary|secondary]$role state [up|down]$state addr A.B.C.D$addr",
"MLAG\n"
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index 86dc826378..510ca398ff 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -2025,12 +2025,12 @@ void pim_pim_interface_delete(struct interface *ifp)
if (!pim_ifp)
return;
+ pim_ifp->pim_enable = false;
+
#if PIM_IPV == 4
pim_autorp_rm_ifp(ifp);
#endif
- pim_ifp->pim_enable = false;
-
pim_if_membership_clear(ifp);
/*
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index 1be5e9cb88..65fcd6ec51 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -3285,7 +3285,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
{
struct vrf *vrf;
struct pim_instance *pim;
- bool enabled;
switch (args->event) {
case NB_EV_VALIDATE:
@@ -3295,10 +3294,8 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
case NB_EV_APPLY:
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim = vrf->info;
- enabled = yang_dnode_get_bool(args->dnode, NULL);
/* Run AutoRP discovery by default */
- if (!enabled)
- pim_autorp_start_discovery(pim);
+ pim_autorp_start_discovery(pim);
break;
}
diff --git a/staticd/static_vty.c b/staticd/static_vty.c
index 6fc4f067d9..087df8efa0 100644
--- a/staticd/static_vty.c
+++ b/staticd/static_vty.c
@@ -86,6 +86,7 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args)
uint8_t segs_stack_id = 0;
char *orig_label = NULL, *orig_seg = NULL;
const char *buf_gate_str;
+ struct ipaddr gate_ip;
uint8_t distance = ZEBRA_STATIC_DISTANCE_DEFAULT;
route_tag_t tag = 0;
uint32_t table_id = 0;
@@ -149,22 +150,27 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args)
if (src.prefixlen)
prefix2str(&src, buf_src_prefix, sizeof(buf_src_prefix));
- if (args->gateway)
+
+ if (args->gateway) {
buf_gate_str = args->gateway;
- else
+ if (str2ipaddr(args->gateway, &gate_ip) != 0) {
+ vty_out(vty, "%% Invalid gateway address %s\n", args->gateway);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ } else
buf_gate_str = "";
if (args->gateway == NULL && args->interface_name == NULL)
type = STATIC_BLACKHOLE;
else if (args->gateway && args->interface_name) {
- if (args->afi == AFI_IP)
+ if (gate_ip.ipa_type == IPADDR_V4)
type = STATIC_IPV4_GATEWAY_IFNAME;
else
type = STATIC_IPV6_GATEWAY_IFNAME;
} else if (args->interface_name)
type = STATIC_IFNAME;
else {
- if (args->afi == AFI_IP)
+ if (gate_ip.ipa_type == IPADDR_V4)
type = STATIC_IPV4_GATEWAY;
else
type = STATIC_IPV6_GATEWAY;
@@ -552,7 +558,7 @@ DEFPY_YANG(ip_route_address_interface,
ip_route_address_interface_cmd,
"[no] ip route\
<A.B.C.D/M$prefix|A.B.C.D$prefix A.B.C.D$mask> \
- A.B.C.D$gate \
+ <A.B.C.D|X:X::X:X>$gate \
<INTERFACE|Null0>$ifname \
[{ \
tag (1-4294967295) \
@@ -571,7 +577,8 @@ DEFPY_YANG(ip_route_address_interface,
"IP destination prefix (e.g. 10.0.0.0/8)\n"
"IP destination prefix\n"
"IP destination prefix mask\n"
- "IP gateway address\n"
+ "IPv4 gateway address\n"
+ "IPv6 gateway address\n"
"IP gateway interface name\n"
"Null interface\n"
"Set tag for this route\n"
@@ -624,7 +631,7 @@ DEFPY_YANG(ip_route_address_interface_vrf,
ip_route_address_interface_vrf_cmd,
"[no] ip route\
<A.B.C.D/M$prefix|A.B.C.D$prefix A.B.C.D$mask> \
- A.B.C.D$gate \
+ <A.B.C.D|X:X::X:X>$gate \
<INTERFACE|Null0>$ifname \
[{ \
tag (1-4294967295) \
@@ -642,7 +649,8 @@ DEFPY_YANG(ip_route_address_interface_vrf,
"IP destination prefix (e.g. 10.0.0.0/8)\n"
"IP destination prefix\n"
"IP destination prefix mask\n"
- "IP gateway address\n"
+ "IPv4 gateway address\n"
+ "IPv6 gateway address\n"
"IP gateway interface name\n"
"Null interface\n"
"Set tag for this route\n"
@@ -693,16 +701,16 @@ DEFPY_YANG(ip_route_address_interface_vrf,
DEFPY_YANG(ip_route,
ip_route_cmd,
"[no] ip route\
- <A.B.C.D/M$prefix|A.B.C.D$prefix A.B.C.D$mask> \
- <A.B.C.D$gate|<INTERFACE|Null0>$ifname> \
- [{ \
- tag (1-4294967295) \
- |(1-255)$distance \
- |vrf NAME \
- |label WORD \
- |table (1-4294967295) \
- |nexthop-vrf NAME \
- |color (1-4294967295) \
+ <A.B.C.D/M$prefix|A.B.C.D$prefix A.B.C.D$mask> \
+ <<A.B.C.D|X:X::X:X>$gate|<INTERFACE|Null0>$ifname> \
+ [{ \
+ tag (1-4294967295) \
+ |(1-255)$distance \
+ |vrf NAME \
+ |label WORD \
+ |table (1-4294967295) \
+ |nexthop-vrf NAME \
+ |color (1-4294967295) \
|bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
|segments WORD \
}]",
@@ -711,7 +719,8 @@ DEFPY_YANG(ip_route,
"IP destination prefix (e.g. 10.0.0.0/8)\n"
"IP destination prefix\n"
"IP destination prefix mask\n"
- "IP gateway address\n"
+ "IPv4 gateway address\n"
+ "IPv6 gateway address\n"
"IP gateway interface name\n"
"Null interface\n"
"Set tag for this route\n"
@@ -761,15 +770,15 @@ DEFPY_YANG(ip_route,
DEFPY_YANG(ip_route_vrf,
ip_route_vrf_cmd,
"[no] ip route\
- <A.B.C.D/M$prefix|A.B.C.D$prefix A.B.C.D$mask> \
- <A.B.C.D$gate|<INTERFACE|Null0>$ifname> \
- [{ \
- tag (1-4294967295) \
- |(1-255)$distance \
- |label WORD \
- |table (1-4294967295) \
- |nexthop-vrf NAME \
- |color (1-4294967295) \
+ <A.B.C.D/M$prefix|A.B.C.D$prefix A.B.C.D$mask> \
+ <<A.B.C.D|X:X::X:X>$gate|<INTERFACE|Null0>$ifname> \
+ [{ \
+ tag (1-4294967295) \
+ |(1-255)$distance \
+ |label WORD \
+ |table (1-4294967295) \
+ |nexthop-vrf NAME \
+ |color (1-4294967295) \
|bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
|segments WORD \
}]",
@@ -778,7 +787,8 @@ DEFPY_YANG(ip_route_vrf,
"IP destination prefix (e.g. 10.0.0.0/8)\n"
"IP destination prefix\n"
"IP destination prefix mask\n"
- "IP gateway address\n"
+ "IPv4 gateway address\n"
+ "IPv6 gateway address\n"
"IP gateway interface name\n"
"Null interface\n"
"Set tag for this route\n"
diff --git a/staticd/static_zebra.c b/staticd/static_zebra.c
index f02fe1e95c..cc09b42836 100644
--- a/staticd/static_zebra.c
+++ b/staticd/static_zebra.c
@@ -209,14 +209,10 @@ static void static_zebra_nexthop_update(struct vrf *vrf, struct prefix *matched,
struct zapi_route *nhr)
{
struct static_nht_data *nhtd, lookup;
- afi_t afi = AFI_IP;
if (static_zclient->bfd_integration)
bfd_nht_update(matched, nhr);
- if (matched->family == AF_INET6)
- afi = AFI_IP6;
-
if (nhr->type == ZEBRA_ROUTE_CONNECT) {
if (static_nexthop_is_local(vrf->vrf_id, matched,
nhr->prefix.family))
@@ -233,8 +229,12 @@ static void static_zebra_nexthop_update(struct vrf *vrf, struct prefix *matched,
if (nhtd) {
nhtd->nh_num = nhr->nexthop_num;
- static_nht_reset_start(matched, afi, nhr->safi, nhtd->nh_vrf_id);
- static_nht_update(NULL, NULL, matched, nhr->nexthop_num, afi, nhr->safi,
+ /* The tracked nexthop might be used by IPv4 and IPv6 routes */
+ static_nht_reset_start(matched, AFI_IP, nhr->safi, nhtd->nh_vrf_id);
+ static_nht_update(NULL, NULL, matched, nhr->nexthop_num, AFI_IP, nhr->safi,
+ nhtd->nh_vrf_id);
+ static_nht_reset_start(matched, AFI_IP6, nhr->safi, nhtd->nh_vrf_id);
+ static_nht_update(NULL, NULL, matched, nhr->nexthop_num, AFI_IP6, nhr->safi,
nhtd->nh_vrf_id);
} else
zlog_err("No nhtd?");
@@ -361,7 +361,7 @@ void static_zebra_nht_register(struct static_nexthop *nh, bool reg)
if (reg) {
if (nhtd->nh_num) {
/* refresh with existing data */
- afi_t afi = prefix_afi(&lookup.nh);
+ afi_t afi = prefix_afi(&rn->p);
if (nh->state == STATIC_NOT_INSTALLED ||
nh->state == STATIC_SENT_TO_ZEBRA)
diff --git a/tests/.gitignore b/tests/.gitignore
index 681438f4a5..51909cd81d 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -22,6 +22,7 @@ frr_northbound*
/lib/cli/test_commands
/lib/cli/test_commands_defun.c
/lib/northbound/test_oper_data
+/lib/northbound/test_oper_exists
/lib/cxxcompat
/lib/fuzz_zlog
/lib/test_assert
diff --git a/tests/lib/northbound/test_oper_data.c b/tests/lib/northbound/test_oper_data.c
index a38325173a..7a3618f3bf 100644
--- a/tests/lib/northbound/test_oper_data.c
+++ b/tests/lib/northbound/test_oper_data.c
@@ -249,9 +249,10 @@ static enum nb_error frr_test_module_c2cont_c2value_get(const struct nb_node *nb
struct lyd_node *parent)
{
const struct lysc_node *snode = nb_node->snode;
- uint32_t value = 0xAB010203;
+ uint32_t value = htole32(0xAB010203);
LY_ERR err;
+ /* Note that this api expects 'value' to be in little-endian form */
err = lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value),
LYD_NEW_PATH_UPDATE, NULL);
assert(err == LY_SUCCESS);
diff --git a/tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py b/tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py
index 5b775aa6cb..bdf905feba 100644
--- a/tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py
+++ b/tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py
@@ -477,6 +477,121 @@ def test_bgp_ecommunity_rpki():
assert result is None, "Received RPKI extended community"
+def test_show_bgp_rpki_as_number():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["r1", "r3"]:
+ logger.info("{}: checking if rtrd is running".format(rname))
+ if rtrd_process[rname].poll() is not None:
+ pytest.skip(tgen.errors)
+
+ step("Check RPKI prefixes for ASN 65531")
+
+ rname = "r2"
+ output = json.loads(tgen.gears[rname].vtysh_cmd("show rpki as-number 65531 json"))
+
+ # Expected output should show no prefixes for this ASN
+ expected = {"ipv4PrefixCount": 0, "ipv6PrefixCount": 0, "prefixes": []}
+
+ assert output == expected, "Found unexpected RPKI prefixes for ASN 65531"
+
+
+def test_show_bgp_rpki_as_number_65530():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["r1", "r3"]:
+ logger.info("{}: checking if rtrd is running".format(rname))
+ if rtrd_process[rname].poll() is not None:
+ pytest.skip(tgen.errors)
+
+ step("Check RPKI prefixes for ASN 65530")
+
+ rname = "r2"
+ output = json.loads(tgen.gears[rname].vtysh_cmd("show rpki as-number 65530 json"))
+
+ expected = {
+ "prefixes": [
+ {
+ "prefix": "198.51.100.0",
+ "prefixLenMin": 24,
+ "prefixLenMax": 24,
+ "asn": 65530,
+ },
+ {
+ "prefix": "203.0.113.0",
+ "prefixLenMin": 24,
+ "prefixLenMax": 24,
+ "asn": 65530,
+ },
+ ],
+ "ipv4PrefixCount": 2,
+ "ipv6PrefixCount": 0,
+ }
+
+ assert (
+ output == expected
+ ), "RPKI prefixes for ASN 65530 do not match expected output"
+
+
+def test_rpki_stop_and_check_connection():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["r1", "r3"]:
+ logger.info("{}: checking if rtrd is running".format(rname))
+ if rtrd_process[rname].poll() is not None:
+ pytest.skip(tgen.errors)
+
+ step("Stop RPKI on r2")
+ rname = "r2"
+ tgen.gears[rname].vtysh_cmd("rpki stop")
+
+ step("Check RPKI cache connection status")
+ output = json.loads(tgen.gears[rname].vtysh_cmd("show rpki cache-connection json"))
+
+ expected = {"error": "No connection to RPKI cache server."}
+ assert (
+ output == expected
+ ), "RPKI cache connection status does not show as disconnected"
+
+
+def test_rpki_start_and_check_connection():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["r1", "r3"]:
+ logger.info("{}: checking if rtrd is running".format(rname))
+ if rtrd_process[rname].poll() is not None:
+ pytest.skip(tgen.errors)
+
+ step("Start RPKI on r2")
+ rname = "r2"
+ tgen.gears[rname].vtysh_cmd("rpki start")
+
+ def _check_rpki_connection():
+ output = json.loads(
+ tgen.gears[rname].vtysh_cmd("show rpki cache-connection json")
+ )
+ # We expect to see a connected group and at least one connection
+ return "connectedGroup" in output and "connections" in output
+
+ step("Check RPKI cache connection status")
+ _, result = topotest.run_and_expect(
+ _check_rpki_connection, True, count=60, wait=0.5
+ )
+ assert result, "RPKI cache connection did not establish after start"
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index 54142e8526..a19c61b19d 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -330,13 +330,12 @@ def create_common_configurations(
for router in routers:
fname = "{}/{}/{}".format(tgen.logdir, router, FRRCFG_FILE)
try:
- frr_cfg_fd = open(fname, mode)
- if config_type:
- frr_cfg_fd.write(config_map[config_type])
- for line in config_dict[router]:
- frr_cfg_fd.write("{} \n".format(str(line)))
- frr_cfg_fd.write("\n")
-
+ with open(fname, mode) as frr_cfg_fd:
+ if config_type:
+ frr_cfg_fd.write(config_map[config_type])
+ for line in config_dict[router]:
+ frr_cfg_fd.write("{} \n".format(str(line)))
+ frr_cfg_fd.write("\n")
except IOError as err:
logger.error("Unable to open FRR Config '%s': %s" % (fname, str(err)))
return False
@@ -487,12 +486,13 @@ def save_initial_config_on_routers(tgen):
procs = {}
for rname in router_list:
logger.debug("Fetching running config for router %s", rname)
- procs[rname] = router_list[rname].popen(
- ["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
- stdin=None,
- stdout=open(target_cfg_fmt.format(rname), "w"),
- stderr=subprocess.PIPE,
- )
+ with open(target_cfg_fmt.format(rname), "w") as target_cfg_fd:
+ procs[rname] = router_list[rname].popen(
+ ["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
+ stdin=None,
+ stdout=target_cfg_fd,
+ stderr=subprocess.PIPE,
+ )
for rname, p in procs.items():
_, error = p.communicate()
if p.returncode:
@@ -543,12 +543,13 @@ def reset_config_on_routers(tgen, routerName=None):
procs = {}
for rname in router_list:
logger.debug("Fetching running config for router %s", rname)
- procs[rname] = router_list[rname].popen(
- ["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
- stdin=None,
- stdout=open(run_cfg_fmt.format(rname, gen), "w"),
- stderr=subprocess.PIPE,
- )
+ with open(run_cfg_fmt.format(rname, gen), "w") as run_cfg_fd:
+ procs[rname] = router_list[rname].popen(
+ ["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
+ stdin=None,
+ stdout=run_cfg_fd,
+ stderr=subprocess.PIPE,
+ )
for rname, p in procs.items():
_, error = p.communicate()
if p.returncode:
@@ -567,19 +568,20 @@ def reset_config_on_routers(tgen, routerName=None):
logger.debug(
"Generating delta for router %s to new configuration (gen %d)", rname, gen
)
- procs[rname] = tgen.net.popen(
- [
- "/usr/lib/frr/frr-reload.py",
- "--test-reset",
- "--input",
- run_cfg_fmt.format(rname, gen),
- "--test",
- target_cfg_fmt.format(rname),
- ],
- stdin=None,
- stdout=open(delta_fmt.format(rname, gen), "w"),
- stderr=subprocess.PIPE,
- )
+ with open(delta_fmt.format(rname, gen), "w") as delta_fd:
+ procs[rname] = tgen.net.popen(
+ [
+ "/usr/lib/frr/frr-reload.py",
+ "--test-reset",
+ "--input",
+ run_cfg_fmt.format(rname, gen),
+ "--test",
+ target_cfg_fmt.format(rname),
+ ],
+ stdin=None,
+ stdout=delta_fd,
+ stderr=subprocess.PIPE,
+ )
for rname, p in procs.items():
_, error = p.communicate()
if p.returncode:
diff --git a/tests/topotests/static_simple/test_static_simple.py b/tests/topotests/static_simple/test_static_simple.py
index afde58fbf7..615d1621f3 100644
--- a/tests/topotests/static_simple/test_static_simple.py
+++ b/tests/topotests/static_simple/test_static_simple.py
@@ -111,6 +111,7 @@ def do_config_inner(
count,
add=True,
do_ipv6=False,
+ do_ipv6_nexthop=False,
do_sadr=False,
via=None,
vrf=None,
@@ -129,6 +130,8 @@ def do_config_inner(
src_prefs = ["2001:db8:1111::/48", "2001:db8:2222::/48"]
elif do_ipv6:
super_prefs = ["2001::/48", "2002::/48"]
+ elif do_ipv6_nexthop:
+ super_prefs = ["11.0.0.0/8", "21.0.0.0/8"]
else:
super_prefs = ["10.0.0.0/8", "20.0.0.0/8"]
@@ -142,11 +145,19 @@ def do_config_inner(
matchvia = f"dev {via}"
else:
if vrf:
- via = "2102::2" if do_ipv6 else "102.0.0.2"
- matchvia = f"via {via} dev r1-eth1"
+ via = "2102::2" if do_ipv6 or do_ipv6_nexthop else "102.0.0.2"
+ matchvia = (
+ f"via inet6 {via} dev r1-eth1"
+ if not do_ipv6 and do_ipv6_nexthop
+ else f"via {via} dev r1-eth1"
+ )
else:
- via = "2101::2" if do_ipv6 else "101.0.0.2"
- matchvia = f"via {via} dev r1-eth0"
+ via = "2101::2" if do_ipv6 or do_ipv6_nexthop else "101.0.0.2"
+ matchvia = (
+ f"via inet6 {via} dev r1-eth0"
+ if not do_ipv6 and do_ipv6_nexthop
+ else f"via {via} dev r1-eth0"
+ )
vrfdbg = " in vrf {}".format(vrf) if vrf else ""
logger.debug("{} {} static {} routes{}".format(optype, count, iptype, vrfdbg))
@@ -201,6 +212,7 @@ def do_config_inner(
def do_config(*args, **kwargs):
do_config_inner(*args, do_ipv6=False, do_sadr=False, **kwargs)
+ do_config_inner(*args, do_ipv6=False, do_ipv6_nexthop=True, **kwargs)
do_config_inner(*args, do_ipv6=True, do_sadr=False, **kwargs)
do_config_inner(*args, do_ipv6=True, do_sadr=True, **kwargs)
diff --git a/yang/frr-pathd.yang b/yang/frr-pathd.yang
index 5beda769c1..96eafda9d4 100644
--- a/yang/frr-pathd.yang
+++ b/yang/frr-pathd.yang
@@ -6,15 +6,9 @@ module frr-pathd {
import ietf-inet-types {
prefix inet;
}
- import ietf-yang-types {
- prefix yang;
- }
import ietf-routing-types {
prefix rt-types;
}
- import frr-interface {
- prefix frr-interface;
- }
organization
"Free Range Routing";
@@ -27,11 +21,10 @@ module frr-pathd {
revision 2018-11-06 {
description
"Initial revision.";
+ reference "FRRouting";
}
typedef protocol-origin-type {
- description
- "Indication for the protocol origin of an object.";
type enumeration {
enum pcep {
value 1;
@@ -46,6 +39,8 @@ module frr-pathd {
description "The object was created through CLI, Yang model via Netconf, gRPC, etc";
}
}
+ description
+ "Indication for the protocol origin of an object.";
}
typedef originator-type {
@@ -57,7 +52,13 @@ module frr-pathd {
}
container pathd {
+ description
+ "Path properties for Segment Routing TE";
+
container srte {
+ description
+ "Segment Routing TE properties";
+
list segment-list {
key "name";
description "Segment-list properties";
@@ -91,9 +92,10 @@ module frr-pathd {
}
container nai {
presence "The segment has a Node or Adjacency Identifier";
+ description
+ "Node or Adjacency Identifier for the segment";
+
leaf type {
- description "NAI type";
- mandatory true;
type enumeration {
enum ipv4_node {
value 1;
@@ -132,42 +134,59 @@ module frr-pathd {
description "IPv6 prefix with optional algorithm";
}
}
+ mandatory true;
+ description "NAI type";
}
leaf local-address {
type inet:ip-address;
mandatory true;
+ description
+ "Local address of the NAI";
}
leaf local-prefix-len {
+ when "../type = 'ipv4_local_iface' or ../type = 'ipv6_local_iface' or ../type = 'ipv4_algo' or ../type = 'ipv6_algo'";
type uint8;
mandatory true;
- when "../type = 'ipv4_local_iface' or ../type = 'ipv6_local_iface' or ../type = 'ipv4_algo' or ../type = 'ipv6_algo'";
+ description
+ "Prefix length of the local address";
}
leaf local-interface {
+ when "../type = 'ipv4_local_iface' or ../type = 'ipv6_local_iface' or ../type = 'ipv4_unnumbered_adjacency'";
type uint32;
mandatory true;
- when "../type = 'ipv4_local_iface' or ../type = 'ipv6_local_iface' or ../type = 'ipv4_unnumbered_adjacency'";
+ description
+ "Local interface ID for the NAI";
}
leaf remote-address {
+ when "../type = 'ipv4_adjacency' or ../type = 'ipv6_adjacency' or ../type = 'ipv4_unnumbered_adjacency'";
type inet:ip-address;
- mandatory true;
- when "../type = 'ipv4_adjacency' or ../type = 'ipv6_adjacency' or ../type = 'ipv4_unnumbered_adjacency'";
- }
- leaf remote-interface {
- type uint32;
- mandatory true;
- when "../type = 'ipv4_unnumbered_adjacency'";
- }
+ mandatory true;
+ description
+ "Remote address of the NAI";
+ }
+ leaf remote-interface {
+ when "../type = 'ipv4_unnumbered_adjacency'";
+ type uint32;
+ mandatory true;
+ description
+ "Remote interface ID for the NAI";
+ }
leaf algorithm {
+ when "../type = 'ipv4_algo' or ../type = 'ipv6_algo'";
type uint8;
mandatory true;
- when "../type = 'ipv4_algo' or ../type = 'ipv6_algo'";
- }
+ description
+ "Algorithm to use for the NAI";
}
+ }
}
}
list policy {
key "color endpoint";
unique "name";
+ description
+ "List of SR Policies.";
+
leaf color {
type uint32;
description
@@ -197,10 +216,10 @@ module frr-pathd {
"True if a valid candidate path of this policy is operational in zebra, False otherwise";
}
list candidate-path {
+ key "preference";
unique "name";
description
"List of Candidate Paths of the SR Policy.";
- key "preference";
leaf preference {
type uint32;
description
@@ -237,17 +256,21 @@ module frr-pathd {
description "Candidate path distinguisher";
}
leaf type {
- description
- "Type of the Candidate Path.";
- mandatory true;
type enumeration {
enum explicit {
value 1;
+ description
+ "Explicit path defined by a segment list";
}
enum dynamic {
value 2;
+ description
+ "Dynamic path computed by a routing protocol";
}
}
+ mandatory true;
+ description
+ "Type of the Candidate Path.";
}
leaf segment-list-name {
type leafref {
@@ -271,10 +294,12 @@ module frr-pathd {
"If the bandwidth limitation is a requirement or only a suggestion";
}
leaf value {
- mandatory true;
type decimal64 {
fraction-digits 6;
}
+ mandatory true;
+ description
+ "The bandwidth value for the candidate path.";
}
}
container affinity {
@@ -298,9 +323,10 @@ module frr-pathd {
}
list metrics {
key "type";
+ description
+ "This list contains the different metrics that can be used to describe a path.";
+
leaf type {
- description
- "Type of the metric.";
type enumeration {
enum igp {
value 1;
@@ -387,6 +413,8 @@ module frr-pathd {
description "Border Node Count metric";
}
}
+ description
+ "Type of the metric.";
}
leaf required {
type boolean;
@@ -405,10 +433,12 @@ module frr-pathd {
"Defines if the value has been generated by the originator of the path.";
}
leaf value {
- mandatory true;
type decimal64 {
fraction-digits 6;
}
+ mandatory true;
+ description
+ "Value of the metric.";
}
}
container objective-function {
@@ -422,9 +452,6 @@ module frr-pathd {
"If an objective function is a requirement, or if it is only a suggestion";
}
leaf type {
- description
- "Type of objective function.";
- mandatory true;
type enumeration {
enum mcp {
value 1;
@@ -495,6 +522,9 @@ module frr-pathd {
description "Minimize the number of Shared Nodes";
}
}
+ mandatory true;
+ description
+ "Type of objective function.";
}
}
}
diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c
index 9f26852d1f..116a697de9 100644
--- a/zebra/dplane_fpm_nl.c
+++ b/zebra/dplane_fpm_nl.c
@@ -587,6 +587,10 @@ static void fpm_read(struct event *t)
struct zebra_dplane_ctx *ctx;
size_t available_bytes;
size_t hdr_available_bytes;
+ struct dplane_ctx_list_head batch_list;
+
+ /* Initialize the batch list */
+ dplane_ctx_q_init(&batch_list);
/* Let's ignore the input at the moment. */
rv = stream_read_try(fnc->ibuf, fnc->socket,
@@ -627,7 +631,7 @@ static void fpm_read(struct event *t)
while (available_bytes) {
if (available_bytes < (ssize_t)FPM_MSG_HDR_LEN) {
stream_pulldown(fnc->ibuf);
- return;
+ goto send_batch;
}
fpm.version = stream_getc(fnc->ibuf);
@@ -642,7 +646,7 @@ static void fpm_read(struct event *t)
__func__, fpm.version, fpm.msg_type);
FPM_RECONNECT(fnc);
- return;
+ goto send_batch;
}
/*
@@ -654,7 +658,7 @@ static void fpm_read(struct event *t)
"%s: Received message length: %u that does not even fill the FPM header",
__func__, fpm.msg_len);
FPM_RECONNECT(fnc);
- return;
+ goto send_batch;
}
/*
@@ -665,7 +669,7 @@ static void fpm_read(struct event *t)
if (fpm.msg_len > available_bytes) {
stream_rewind_getp(fnc->ibuf, FPM_MSG_HDR_LEN);
stream_pulldown(fnc->ibuf);
- return;
+ goto send_batch;
}
available_bytes -= FPM_MSG_HDR_LEN;
@@ -715,8 +719,9 @@ static void fpm_read(struct event *t)
break;
}
- /* Parse the route data into a dplane ctx, then
- * enqueue it to zebra for processing.
+ /*
+ * Parse the route data into a dplane ctx, add to ctx list
+ * and enqueue the batch of ctx to zebra for processing
*/
ctx = dplane_ctx_alloc();
dplane_ctx_route_init(ctx, DPLANE_OP_ROUTE_NOTIFY, NULL,
@@ -735,7 +740,8 @@ static void fpm_read(struct event *t)
* tableid to 0 in order for this to work.
*/
dplane_ctx_set_vrf(ctx, VRF_UNKNOWN);
- dplane_provider_enqueue_to_zebra(ctx);
+ /* Add to the list for batching */
+ dplane_ctx_enqueue_tail(&batch_list, ctx);
} else {
/*
* Let's continue to read other messages
@@ -755,6 +761,15 @@ static void fpm_read(struct event *t)
}
stream_reset(fnc->ibuf);
+
+send_batch:
+ /* Send all contexts to zebra in a single batch if we have any */
+ if (dplane_ctx_queue_count(&batch_list) > 0) {
+ if (IS_ZEBRA_DEBUG_FPM)
+ zlog_debug("%s: Sending batch of %u contexts to zebra", __func__,
+ dplane_ctx_queue_count(&batch_list));
+ dplane_provider_enqueue_ctx_list_to_zebra(&batch_list);
+ }
}
static void fpm_write(struct event *t)
diff --git a/zebra/fpm_listener.c b/zebra/fpm_listener.c
index 7ae9601ef4..73e9dc2482 100644
--- a/zebra/fpm_listener.c
+++ b/zebra/fpm_listener.c
@@ -19,6 +19,7 @@
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
+#include <signal.h>
#ifdef GNU_LINUX
#include <stdint.h>
@@ -42,6 +43,35 @@
XREF_SETUP();
+PREDECL_RBTREE_UNIQ(fpm_route);
+
+/* Route structure to store in RB tree */
+struct fpm_route {
+ struct prefix prefix;
+ uint32_t table_id;
+ uint32_t nhg_id;
+ struct fpm_route_item rb_item;
+};
+
+/* Comparison function for routes */
+static int fpm_route_cmp(const struct fpm_route *a, const struct fpm_route *b)
+{
+ int ret;
+
+ /* First compare table IDs */
+ if (a->table_id < b->table_id)
+ return -1;
+ if (a->table_id > b->table_id)
+ return 1;
+
+ /* Then compare prefixes */
+ ret = prefix_cmp(&a->prefix, &b->prefix);
+ return ret;
+}
+
+/* RB tree for storing routes */
+DECLARE_RBTREE_UNIQ(fpm_route, struct fpm_route, rb_item, fpm_route_cmp);
+
struct glob {
int server_sock;
int sock;
@@ -49,6 +79,8 @@ struct glob {
bool reflect_fail_all;
bool dump_hex;
FILE *output_file;
+ const char *dump_file;
+ struct fpm_route_head route_tree;
};
struct glob glob_space;
@@ -758,6 +790,64 @@ static void fpm_listener_hexdump(const void *mem, size_t len)
}
/*
+ * handle_route_update
+ * Handles adding or removing a route from the route tree
+ */
+static void handle_route_update(struct netlink_msg_ctx *ctx, bool is_add)
+{
+ struct fpm_route *route;
+ struct fpm_route *existing;
+ struct fpm_route lookup = { 0 };
+
+ if (!ctx->dest || !ctx->rtmsg)
+ return;
+
+ /* Set up lookup key */
+ lookup.prefix.family = ctx->rtmsg->rtm_family;
+ lookup.prefix.prefixlen = ctx->rtmsg->rtm_dst_len;
+ memcpy(&lookup.prefix.u.prefix, RTA_DATA(ctx->dest),
+ (ctx->rtmsg->rtm_family == AF_INET) ? 4 : 16);
+ lookup.table_id = ctx->rtmsg->rtm_table;
+ lookup.nhg_id = ctx->nhgid ? *ctx->nhgid : 0;
+ /* Look up existing route */
+ existing = fpm_route_find(&glob->route_tree, &lookup);
+
+ if (is_add) {
+ if (existing) {
+ /* Route exists, update it */
+ existing->prefix = lookup.prefix;
+ existing->table_id = lookup.table_id;
+ existing->nhg_id = lookup.nhg_id;
+ } else {
+ /* Create new route structure */
+ route = calloc(1, sizeof(struct fpm_route));
+ if (!route) {
+ fprintf(stderr, "Failed to allocate route structure\n");
+ return;
+ }
+
+ /* Copy prefix information */
+ route->prefix = lookup.prefix;
+ route->table_id = lookup.table_id;
+ route->nhg_id = lookup.nhg_id;
+
+ /* Add route to tree */
+ if (fpm_route_add(&glob->route_tree, route)) {
+ fprintf(stderr, "Failed to add route to tree\n");
+ free(route);
+ }
+ }
+ } else {
+ /* Remove route from tree */
+ if (existing) {
+ existing = fpm_route_del(&glob->route_tree, existing);
+ if (existing)
+ free(existing);
+ }
+ }
+}
+
+/*
* parse_netlink_msg
*/
static void parse_netlink_msg(char *buf, size_t buf_len, fpm_msg_hdr_t *fpm)
@@ -789,6 +879,7 @@ static void parse_netlink_msg(char *buf, size_t buf_len, fpm_msg_hdr_t *fpm)
}
print_netlink_msg_ctx(ctx);
+ handle_route_update(ctx, hdr->nlmsg_type == RTM_NEWROUTE);
if (glob->reflect && hdr->nlmsg_type == RTM_NEWROUTE &&
ctx->rtmsg->rtm_protocol > RTPROT_STATIC) {
@@ -854,17 +945,62 @@ static void fpm_serve(void)
}
}
+/* Signal handler for SIGUSR1 */
+static void sigusr1_handler(int signum)
+{
+ struct fpm_route *route;
+ char buf[PREFIX_STRLEN];
+ FILE *out = glob->output_file;
+ FILE *dump_fp = NULL;
+
+ if (glob->dump_file) {
+ dump_fp = fopen(glob->dump_file, "w");
+ if (dump_fp) {
+ out = dump_fp;
+ setbuf(dump_fp, NULL);
+ } else
+ out = glob->output_file;
+ }
+
+ fprintf(out, "\n=== Route Tree Dump ===\n");
+ fprintf(out, "Timestamp: %s\n", get_timestamp());
+ fprintf(out, "Total routes: %zu\n", fpm_route_count(&glob->route_tree));
+ fprintf(out, "Routes:\n");
+
+ frr_each (fpm_route, &glob->route_tree, route) {
+ prefix2str(&route->prefix, buf, sizeof(buf));
+ fprintf(out, " Table %u, NHG %u: %s\n", route->table_id, route->nhg_id, buf);
+ }
+ fprintf(out, "=====================\n\n");
+ fflush(out);
+
+ if (dump_fp)
+ fclose(dump_fp);
+}
+
int main(int argc, char **argv)
{
pid_t daemon;
int r;
bool fork_daemon = false;
const char *output_file = NULL;
+ struct sigaction sa;
memset(glob, 0, sizeof(*glob));
glob->output_file = stdout;
+ fpm_route_init(&glob->route_tree);
+
+ /* Set up signal handler for SIGUSR1 */
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = sigusr1_handler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART;
+ if (sigaction(SIGUSR1, &sa, NULL) < 0) {
+ fprintf(stderr, "Failed to set up SIGUSR1 handler: %s\n", strerror(errno));
+ exit(1);
+ }
- while ((r = getopt(argc, argv, "rfdvo:")) != -1) {
+ while ((r = getopt(argc, argv, "rfdvo:z:")) != -1) {
switch (r) {
case 'r':
glob->reflect = true;
@@ -881,6 +1017,9 @@ int main(int argc, char **argv)
case 'o':
output_file = optarg;
break;
+ case 'z':
+ glob->dump_file = optarg;
+ break;
}
}
diff --git a/zebra/rib.h b/zebra/rib.h
index 652f6208f4..fa6ce4447b 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -192,6 +192,12 @@ struct route_entry {
struct meta_queue {
struct list *subq[MQ_SIZE];
uint32_t size; /* sum of lengths of all subqueues */
+ _Atomic uint32_t max_subq[MQ_SIZE]; /* Max size of individual sub queue */
+ _Atomic uint32_t max_metaq; /* Max size of the MetaQ */
+ _Atomic uint32_t total_subq[MQ_SIZE]; /* Total subq events */
+ _Atomic uint32_t total_metaq; /* Total MetaQ events */
+ _Atomic uint32_t re_subq[MQ_SIZE]; /* current RE count sub queue */
+ _Atomic uint32_t max_re_subq[MQ_SIZE]; /* Max RE in sub queue */
};
/*
@@ -474,6 +480,7 @@ extern void zebra_rib_evaluate_rn_nexthops(struct route_node *rn, uint32_t seq,
bool rt_delete);
extern void rib_update_handle_vrf_all(enum rib_update_event event, int rtype);
+int zebra_show_metaq_counter(struct vty *vty, bool uj);
/*
* rib_find_rn_from_ctx
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 4344a8d79a..a6d43daa93 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -6590,6 +6590,14 @@ int dplane_provider_work_ready(void)
}
/*
+ * Enqueue a context list to zebra main.
+ */
+void dplane_provider_enqueue_ctx_list_to_zebra(struct dplane_ctx_list_head *batch_list)
+{
+ (zdplane_info.dg_results_cb)(batch_list);
+}
+
+/*
* Enqueue a context directly to zebra main.
*/
void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx)
diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h
index cabc70c232..1c03a29534 100644
--- a/zebra/zebra_dplane.h
+++ b/zebra/zebra_dplane.h
@@ -1236,6 +1236,9 @@ void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
/* Enqueue a context directly to zebra main. */
void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx);
+/* Enqueue a context list to zebra main. */
+void dplane_provider_enqueue_ctx_list_to_zebra(struct dplane_ctx_list_head *batch_list);
+
/* Enable collection of extra info about interfaces in route updates;
* this allows a provider/plugin to see some extra info in route update
* context objects.
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index e32b004ae9..c7dc5e5d07 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -30,6 +30,7 @@
#include "printfrr.h"
#include "frrscript.h"
#include "frrdistance.h"
+#include "lib/termtable.h"
#include "zebra/zebra_router.h"
#include "zebra/connected.h"
@@ -273,6 +274,63 @@ static const char *subqueue2str(enum meta_queue_indexes index)
return "Unknown";
}
+/* Handler for 'show zebra metaq' */
+int zebra_show_metaq_counter(struct vty *vty, bool uj)
+{
+ struct meta_queue *mq = zrouter.mq;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+ json_object *json = NULL;
+ json_object *json_table = NULL;
+
+ if (!mq)
+ return CMD_WARNING;
+
+ /* Create a table for subqueue details */
+ tt = ttable_new(&ttable_styles[TTSTYLE_ASCII]);
+ ttable_add_row(tt, "SubQ|Current|Max Size|Total");
+
+ /* Add rows for each subqueue */
+ for (uint8_t i = 0; i < MQ_SIZE; i++) {
+ ttable_add_row(tt, "%s|%u|%u|%u", subqueue2str(i), mq->subq[i]->count,
+ mq->max_subq[i], mq->total_subq[i]);
+ }
+
+ /* For a better formatting between the content and separator */
+ tt->style.cell.rpad = 2;
+ tt->style.cell.lpad = 1;
+ ttable_restyle(tt);
+
+ if (uj) {
+ json = json_object_new_object();
+ /* Add MetaQ summary to the JSON object */
+ json_object_int_add(json, "currentSize", mq->size);
+ json_object_int_add(json, "maxSize", mq->max_metaq);
+ json_object_int_add(json, "total", mq->total_metaq);
+
+ /* Convert the table to JSON and add it to the main JSON object */
+ /* n = name/string, u = unsigned int */
+ json_table = ttable_json(tt, "sddd");
+ json_object_object_add(json, "subqueues", json_table);
+ vty_json(vty, json);
+ } else {
+ vty_out(vty, "MetaQ Summary\n");
+ vty_out(vty, "Current Size\t: %u\n", mq->size);
+ vty_out(vty, "Max Size\t: %u\n", mq->max_metaq);
+ vty_out(vty, "Total\t\t: %u\n", mq->total_metaq);
+
+ /* Dump the table */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP_TTABLE, table);
+ }
+
+ /* Clean up the table */
+ ttable_del(tt);
+
+ return CMD_SUCCESS;
+}
+
printfrr_ext_autoreg_p("ZN", printfrr_zebra_node);
static ssize_t printfrr_zebra_node(struct fbuf *buf, struct printfrr_eargs *ea,
const void *ptr)
@@ -3257,6 +3315,7 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
struct route_node *rn = NULL;
struct route_entry *re = NULL, *curr_re = NULL;
uint8_t qindex = MQ_SIZE, curr_qindex = MQ_SIZE;
+ uint64_t curr, high;
rn = (struct route_node *)data;
@@ -3300,6 +3359,15 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
listnode_add(mq->subq[qindex], rn);
route_lock_node(rn);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[qindex], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[qindex]);
+ high = atomic_load_explicit(&mq->max_subq[qindex], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[qindex], curr, memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
rnode_debug(rn, re->vrf_id, "queued rn %p into sub-queue %s mq size %u", (void *)rn,
@@ -3310,8 +3378,21 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
static int early_label_meta_queue_add(struct meta_queue *mq, void *data)
{
+ uint64_t curr, high;
+
listnode_add(mq->subq[META_QUEUE_EARLY_LABEL], data);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_EARLY_LABEL], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[META_QUEUE_EARLY_LABEL]);
+ high = atomic_load_explicit(&mq->max_subq[META_QUEUE_EARLY_LABEL], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[META_QUEUE_EARLY_LABEL], curr,
+ memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
+
return 0;
}
@@ -3320,6 +3401,7 @@ static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data)
struct nhg_ctx *ctx = NULL;
uint8_t qindex = META_QUEUE_NHG;
struct wq_nhg_wrapper *w;
+ uint64_t curr, high;
ctx = (struct nhg_ctx *)data;
@@ -3333,6 +3415,15 @@ static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data)
listnode_add(mq->subq[qindex], w);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[qindex], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[qindex]);
+ high = atomic_load_explicit(&mq->max_subq[qindex], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[qindex], curr, memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug("NHG Context id=%u queued into sub-queue %s mq size %u", ctx->id,
@@ -3347,6 +3438,7 @@ static int rib_meta_queue_nhg_process(struct meta_queue *mq, void *data,
struct nhg_hash_entry *nhe = NULL;
uint8_t qindex = META_QUEUE_NHG;
struct wq_nhg_wrapper *w;
+ uint64_t curr, high;
nhe = (struct nhg_hash_entry *)data;
@@ -3361,6 +3453,15 @@ static int rib_meta_queue_nhg_process(struct meta_queue *mq, void *data,
listnode_add(mq->subq[qindex], w);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[qindex], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[qindex]);
+ high = atomic_load_explicit(&mq->max_subq[qindex], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[qindex], curr, memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug("NHG id=%u queued into sub-queue %s mq size %u", nhe->id,
@@ -3381,8 +3482,19 @@ static int rib_meta_queue_nhg_del(struct meta_queue *mq, void *data)
static int rib_meta_queue_evpn_add(struct meta_queue *mq, void *data)
{
+ uint64_t curr, high;
+
listnode_add(mq->subq[META_QUEUE_EVPN], data);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_EVPN], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[META_QUEUE_EVPN]);
+ high = atomic_load_explicit(&mq->max_subq[META_QUEUE_EVPN], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[META_QUEUE_EVPN], curr, memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
return 0;
}
@@ -4227,8 +4339,19 @@ void _route_entry_dump(const char *func, union prefixconstptr pp,
static int rib_meta_queue_gr_run_add(struct meta_queue *mq, void *data)
{
+ uint64_t curr, high;
+
listnode_add(mq->subq[META_QUEUE_GR_RUN], data);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_GR_RUN], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[META_QUEUE_GR_RUN]);
+ high = atomic_load_explicit(&mq->max_subq[META_QUEUE_GR_RUN], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[META_QUEUE_GR_RUN], curr, memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug("Graceful Run adding mq size %u", zrouter.mq->size);
@@ -4239,9 +4362,20 @@ static int rib_meta_queue_gr_run_add(struct meta_queue *mq, void *data)
static int rib_meta_queue_early_route_add(struct meta_queue *mq, void *data)
{
struct zebra_early_route *ere = data;
+ uint64_t curr, high;
listnode_add(mq->subq[META_QUEUE_EARLY_ROUTE], data);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_EARLY_ROUTE], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[META_QUEUE_EARLY_ROUTE]);
+ high = atomic_load_explicit(&mq->max_subq[META_QUEUE_EARLY_ROUTE], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[META_QUEUE_EARLY_ROUTE], curr,
+ memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
if (IS_ZEBRA_DEBUG_RIB_DETAILED) {
struct vrf *vrf = vrf_lookup_by_id(ere->re->vrf_id);
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index 15bc2c20d2..9e4db11989 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -4047,6 +4047,20 @@ DEFUN (zebra_show_routing_tables_summary,
return CMD_SUCCESS;
}
+/* Display Zebra MetaQ counters */
+DEFUN (show_zebra_metaq_counters,
+ show_zebra_metaq_counters_cmd,
+ "show zebra metaq [json]",
+ SHOW_STR
+ ZEBRA_STR
+ "Zebra MetaQ counters\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+
+ return zebra_show_metaq_counter(vty, uj);
+}
+
/* IPForwarding configuration write function. */
static int config_write_forwarding(struct vty *vty)
{
@@ -4336,6 +4350,7 @@ void zebra_vty_init(void)
install_element(VIEW_NODE, &show_dataplane_providers_cmd);
install_element(CONFIG_NODE, &zebra_dplane_queue_limit_cmd);
install_element(CONFIG_NODE, &no_zebra_dplane_queue_limit_cmd);
+ install_element(VIEW_NODE, &show_zebra_metaq_counters_cmd);
#ifdef HAVE_NETLINK
install_element(CONFIG_NODE, &zebra_kernel_netlink_batch_tx_buf_cmd);