summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--babeld/kernel.c4
-rw-r--r--babeld/kernel.h2
-rw-r--r--bgpd/bgp_advertise.c2
-rw-r--r--bgpd/bgp_damp.c10
-rw-r--r--bgpd/bgp_dump.c2
-rw-r--r--bgpd/bgp_evpn.c29
-rw-r--r--bgpd/bgp_evpn_mh.c4
-rw-r--r--bgpd/bgp_evpn_vty.c76
-rw-r--r--bgpd/bgp_fsm.c10
-rw-r--r--bgpd/bgp_io.c2
-rw-r--r--bgpd/bgp_labelpool.c24
-rw-r--r--bgpd/bgp_mplsvpn.c2
-rw-r--r--bgpd/bgp_nexthop.c7
-rw-r--r--bgpd/bgp_nexthop.h1
-rw-r--r--bgpd/bgp_nht.c52
-rw-r--r--bgpd/bgp_packet.c10
-rw-r--r--bgpd/bgp_route.c19
-rw-r--r--bgpd/bgp_snmp.c4
-rw-r--r--bgpd/bgp_updgrp.c4
-rw-r--r--bgpd/bgp_vty.c16
-rw-r--r--bgpd/bgpd.c38
-rw-r--r--bgpd/bgpd.h3
-rw-r--r--bgpd/rfapi/bgp_rfapi_cfg.c2
-rw-r--r--bgpd/rfapi/rfapi.c11
-rw-r--r--bgpd/rfapi/rfapi_import.c2
-rw-r--r--bgpd/rfapi/rfapi_private.h5
-rw-r--r--bgpd/rfapi/rfapi_rib.c22
-rw-r--r--bgpd/rfapi/rfapi_vty.c2
-rw-r--r--debian/control1
-rw-r--r--doc/user/bgp.rst3
-rw-r--r--doc/user/pimv6.rst3
-rw-r--r--gdb/lib.txt22
-rw-r--r--lib/sigevent.c3
-rw-r--r--nhrpd/nhrp_interface.c3
-rw-r--r--ospf6d/ospf6_abr.c7
-rw-r--r--ospfd/ospf_vty.c27
-rw-r--r--pimd/mtracebis_netlink.c2
-rw-r--r--pimd/pim6_mld.c135
-rw-r--r--pimd/pim_addr.h2
-rw-r--r--pimd/pim_cmd.c4
-rw-r--r--pimd/pim_cmd_common.c223
-rw-r--r--pimd/pim_iface.c8
-rw-r--r--pimd/pim_igmp.c11
-rw-r--r--pimd/pim_instance.h2
-rw-r--r--pimd/pim_nb_config.c21
-rw-r--r--pimd/pim_rp.c61
-rw-r--r--pimd/pim_vty.c6
-rw-r--r--ripd/ripd.c3
-rw-r--r--tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py396
-rwxr-xr-xtests/topotests/conftest.py2
-rw-r--r--tests/topotests/lib/topogen.py2
-rwxr-xr-xtools/frrcommon.sh.in12
-rw-r--r--zebra/kernel_socket.c8
-rw-r--r--zebra/redistribute.c13
-rw-r--r--zebra/rib.h22
-rw-r--r--zebra/rt_netlink.c65
-rw-r--r--zebra/tc_netlink.c2
-rw-r--r--zebra/zapi_msg.c34
-rw-r--r--zebra/zebra_dplane.c5
-rw-r--r--zebra/zebra_mpls.c10
-rw-r--r--zebra/zebra_mpls.h17
-rw-r--r--zebra/zebra_rib.c1083
-rw-r--r--zebra/zebra_script.c19
-rw-r--r--zebra/zserv.c12
-rw-r--r--zebra/zserv.h10
65 files changed, 1769 insertions, 855 deletions
diff --git a/babeld/kernel.c b/babeld/kernel.c
index 3941db8d5f..5aa01ceb44 100644
--- a/babeld/kernel.c
+++ b/babeld/kernel.c
@@ -227,10 +227,10 @@ if_eui64(int ifindex, unsigned char *eui)
/* Like gettimeofday, but returns monotonic time. If POSIX clocks are not
available, falls back to gettimeofday but enforces monotonicity. */
-int
+void
gettime(struct timeval *tv)
{
- return monotime(tv);
+ monotime(tv);
}
/* If /dev/urandom doesn't exist, this will fail with ENOENT, which the
diff --git a/babeld/kernel.h b/babeld/kernel.h
index 5b1437ef3e..f39bc35bdb 100644
--- a/babeld/kernel.h
+++ b/babeld/kernel.h
@@ -43,7 +43,7 @@ int kernel_route(enum babel_kernel_routes operation, const unsigned char *dest,
unsigned int metric, const unsigned char *newgate,
int newifindex, unsigned int newmetric);
int if_eui64(int ifindex, unsigned char *eui);
-int gettime(struct timeval *tv);
+void gettime(struct timeval *tv);
int read_random_bytes(void *buf, size_t len);
#endif /* BABEL_KERNEL_H */
diff --git a/bgpd/bgp_advertise.c b/bgpd/bgp_advertise.c
index cfbb29df1c..f62a54b03c 100644
--- a/bgpd/bgp_advertise.c
+++ b/bgpd/bgp_advertise.c
@@ -197,7 +197,7 @@ void bgp_adj_in_set(struct bgp_dest *dest, struct peer *peer, struct attr *attr,
adj = XCALLOC(MTYPE_BGP_ADJ_IN, sizeof(struct bgp_adj_in));
adj->peer = peer_lock(peer); /* adj_in peer reference */
adj->attr = bgp_attr_intern(attr);
- adj->uptime = bgp_clock();
+ adj->uptime = monotime(NULL);
adj->addpath_rx_id = addpath_id;
BGP_ADJ_IN_ADD(dest, adj);
bgp_dest_lock_node(dest);
diff --git a/bgpd/bgp_damp.c b/bgpd/bgp_damp.c
index 9acbaf7733..664619078a 100644
--- a/bgpd/bgp_damp.c
+++ b/bgpd/bgp_damp.c
@@ -125,7 +125,7 @@ static void bgp_reuse_timer(struct thread *t)
thread_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
&bdc->t_reuse);
- t_now = bgp_clock();
+ t_now = monotime(NULL);
/* 1. save a pointer to the current zeroth queue head and zero the
list head entry. */
@@ -189,7 +189,7 @@ int bgp_damp_withdraw(struct bgp_path_info *path, struct bgp_dest *dest,
unsigned int last_penalty = 0;
struct bgp_damp_config *bdc = &damp[afi][safi];
- t_now = bgp_clock();
+ t_now = monotime(NULL);
/* Processing Unreachable Messages. */
if (path->extra)
@@ -273,7 +273,7 @@ int bgp_damp_update(struct bgp_path_info *path, struct bgp_dest *dest,
if (!path->extra || !((bdi = path->extra->damp_info)))
return BGP_DAMP_USED;
- t_now = bgp_clock();
+ t_now = monotime(NULL);
bgp_path_info_unset_flag(dest, path, BGP_PATH_HISTORY);
bdi->lastrecord = BGP_RECORD_UPDATE;
@@ -588,7 +588,7 @@ void bgp_damp_info_vty(struct vty *vty, struct bgp_path_info *path, afi_t afi,
return;
/* Calculate new penalty. */
- t_now = bgp_clock();
+ t_now = monotime(NULL);
t_diff = t_now - bdi->t_updated;
penalty = bgp_damp_decay(t_diff, bdi->penalty, bdc);
@@ -642,7 +642,7 @@ const char *bgp_damp_reuse_time_vty(struct vty *vty, struct bgp_path_info *path,
return NULL;
/* Calculate new penalty. */
- t_now = bgp_clock();
+ t_now = monotime(NULL);
t_diff = t_now - bdi->t_updated;
penalty = bgp_damp_decay(t_diff, bdi->penalty, bdc);
diff --git a/bgpd/bgp_dump.c b/bgpd/bgp_dump.c
index 720925b20f..9f64341640 100644
--- a/bgpd/bgp_dump.c
+++ b/bgpd/bgp_dump.c
@@ -367,7 +367,7 @@ bgp_dump_route_node_record(int afi, struct bgp_dest *dest,
stream_putw(obuf, path->peer->table_dump_index);
/* Originated */
- stream_putl(obuf, time(NULL) - (bgp_clock() - path->uptime));
+ stream_putl(obuf, time(NULL) - (monotime(NULL) - path->uptime));
/*Path Identifier*/
if (addpath_capable) {
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index 0642c966eb..ce05005eab 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -1263,7 +1263,7 @@ static int update_evpn_type5_route_entry(struct bgp *bgp_evpn,
/* Unintern existing, set to new. */
bgp_attr_unintern(&tmp_pi->attr);
tmp_pi->attr = attr_new;
- tmp_pi->uptime = bgp_clock();
+ tmp_pi->uptime = monotime(NULL);
}
}
return 0;
@@ -1626,7 +1626,7 @@ static int update_evpn_route_entry(struct bgp *bgp, struct bgpevpn *vpn,
/* Unintern existing, set to new. */
bgp_attr_unintern(&tmp_pi->attr);
tmp_pi->attr = attr_new;
- tmp_pi->uptime = bgp_clock();
+ tmp_pi->uptime = monotime(NULL);
}
}
@@ -2520,7 +2520,7 @@ static int install_evpn_route_entry_in_vrf(struct bgp *bgp_vrf,
/* Unintern existing, set to new. */
bgp_attr_unintern(&pi->attr);
pi->attr = attr_new;
- pi->uptime = bgp_clock();
+ pi->uptime = monotime(NULL);
}
/* Gateway IP nexthop should be resolved */
@@ -2643,7 +2643,7 @@ static int install_evpn_route_entry(struct bgp *bgp, struct bgpevpn *vpn,
/* Unintern existing, set to new. */
bgp_attr_unintern(&pi->attr);
pi->attr = attr_new;
- pi->uptime = bgp_clock();
+ pi->uptime = monotime(NULL);
}
/* Add this route to remote IP hashtable */
@@ -6239,9 +6239,6 @@ static void bgp_evpn_remote_ip_hash_iterate(struct bgpevpn *vpn,
static void show_remote_ip_entry(struct hash_bucket *bucket, void *args)
{
char buf[INET6_ADDRSTRLEN];
- char buf2[EVPN_ROUTE_STRLEN];
- struct prefix_evpn *evp;
-
struct listnode *node = NULL;
struct bgp_path_info *pi = NULL;
struct vty *vty = (struct vty *)args;
@@ -6250,11 +6247,8 @@ static void show_remote_ip_entry(struct hash_bucket *bucket, void *args)
vty_out(vty, " Remote IP: %s\n",
ipaddr2str(&ip->addr, buf, sizeof(buf)));
vty_out(vty, " Linked MAC/IP routes:\n");
- for (ALL_LIST_ELEMENTS_RO(ip->macip_path_list, node, pi)) {
- evp = (struct prefix_evpn *)&pi->net->p;
- prefix2str(evp, buf2, sizeof(buf2));
- vty_out(vty, " %s\n", buf2);
- }
+ for (ALL_LIST_ELEMENTS_RO(ip->macip_path_list, node, pi))
+ vty_out(vty, " %pFX\n", &pi->net->p);
}
void bgp_evpn_show_remote_ip_hash(struct hash_bucket *bucket, void *args)
@@ -6438,14 +6432,11 @@ static void bgp_evpn_remote_ip_process_nexthops(struct bgpevpn *vpn,
if (!bnc->nexthop || bnc->nexthop->ifindex != vpn->svi_ifindex)
return;
- if (BGP_DEBUG(nht, NHT)) {
- char buf[PREFIX2STR_BUFFER];
-
- prefix2str(&bnc->prefix, buf, sizeof(buf));
- zlog_debug("%s(%u): vni %u mac/ip %s for NH %s",
+ if (BGP_DEBUG(nht, NHT))
+ zlog_debug("%s(%u): vni %u mac/ip %s for NH %pFX",
vpn->bgp_vrf->name_pretty, vpn->tenant_vrf_id,
- vpn->vni, (resolve ? "add" : "delete"), buf);
- }
+ vpn->vni, (resolve ? "add" : "delete"),
+ &bnc->prefix);
/*
* MAC/IP route or SVI or tenant vrf being added to EVI.
diff --git a/bgpd/bgp_evpn_mh.c b/bgpd/bgp_evpn_mh.c
index 3f801f7ea0..95a0c31b55 100644
--- a/bgpd/bgp_evpn_mh.c
+++ b/bgpd/bgp_evpn_mh.c
@@ -233,7 +233,7 @@ static int bgp_evpn_es_route_install(struct bgp *bgp,
/* Unintern existing, set to new. */
bgp_attr_unintern(&pi->attr);
pi->attr = attr_new;
- pi->uptime = bgp_clock();
+ pi->uptime = monotime(NULL);
}
/* Perform route selection and update zebra, if required. */
@@ -432,7 +432,7 @@ int bgp_evpn_mh_route_update(struct bgp *bgp, struct bgp_evpn_es *es,
/* Unintern existing, set to new. */
bgp_attr_unintern(&tmp_pi->attr);
tmp_pi->attr = attr_new;
- tmp_pi->uptime = bgp_clock();
+ tmp_pi->uptime = monotime(NULL);
}
}
diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c
index 6ba516c39c..4277162339 100644
--- a/bgpd/bgp_evpn_vty.c
+++ b/bgpd/bgp_evpn_vty.c
@@ -614,14 +614,10 @@ static void show_esi_routes(struct bgp *bgp,
for (dest = bgp_table_top(es->route_table); dest;
dest = bgp_route_next(dest)) {
int add_prefix_to_json = 0;
- char prefix_str[BUFSIZ];
json_object *json_paths = NULL;
json_object *json_prefix = NULL;
const struct prefix *p = bgp_dest_get_prefix(dest);
- prefix2str((struct prefix_evpn *)p, prefix_str,
- sizeof(prefix_str));
-
if (json)
json_prefix = json_object_new_object();
@@ -661,14 +657,14 @@ static void show_esi_routes(struct bgp *bgp,
if (json) {
if (add_prefix_to_json) {
- json_object_string_add(json_prefix, "prefix",
- prefix_str);
+ json_object_string_addf(json_prefix, "prefix",
+ "%pFX", p);
json_object_int_add(json_prefix, "prefixLen",
p->prefixlen);
json_object_object_add(json_prefix, "paths",
json_paths);
- json_object_object_add(json, prefix_str,
- json_prefix);
+ json_object_object_addf(json, json_prefix,
+ "%pFX", p);
} else {
json_object_free(json_paths);
json_object_free(json_prefix);
@@ -800,14 +796,10 @@ static void show_vni_routes(struct bgp *bgp, struct bgpevpn *vpn, int type,
const struct prefix_evpn *evp =
(const struct prefix_evpn *)bgp_dest_get_prefix(dest);
int add_prefix_to_json = 0;
- char prefix_str[BUFSIZ];
json_object *json_paths = NULL;
json_object *json_prefix = NULL;
const struct prefix *p = bgp_dest_get_prefix(dest);
- prefix2str((struct prefix_evpn *)bgp_dest_get_prefix(dest),
- prefix_str, sizeof(prefix_str));
-
if (type && evp->prefix.route_type != type)
continue;
@@ -861,14 +853,14 @@ static void show_vni_routes(struct bgp *bgp, struct bgpevpn *vpn, int type,
if (json) {
if (add_prefix_to_json) {
- json_object_string_add(json_prefix, "prefix",
- prefix_str);
+ json_object_string_addf(json_prefix, "prefix",
+ "%pFX", p);
json_object_int_add(json_prefix, "prefixLen",
p->prefixlen);
json_object_object_add(json_prefix, "paths",
json_paths);
- json_object_object_add(json, prefix_str,
- json_prefix);
+ json_object_object_addf(json, json_prefix,
+ "%pFX", p);
} else {
json_object_free(json_paths);
json_object_free(json_prefix);
@@ -1190,7 +1182,6 @@ static int bgp_show_ethernet_vpn(struct vty *vty, struct prefix_rd *prd,
int rd_header;
int header = 1;
char rd_str[RD_ADDRSTRLEN];
- char buf[BUFSIZ];
int no_display;
unsigned long output_count = 0;
@@ -1353,20 +1344,17 @@ static int bgp_show_ethernet_vpn(struct vty *vty, struct prefix_rd *prd,
json_prefix_info = json_object_new_object();
- prefix2str((struct prefix_evpn *)p, buf,
- BUFSIZ);
-
- json_object_string_addf(
- json_prefix_info, "prefix", "%pFX",
- (struct prefix_evpn *)p);
+ json_object_string_addf(json_prefix_info,
+ "prefix", "%pFX", p);
json_object_int_add(json_prefix_info,
"prefixLen", p->prefixlen);
json_object_object_add(json_prefix_info,
"paths", json_array);
- json_object_object_add(json_nroute, buf,
- json_prefix_info);
+ json_object_object_addf(json_nroute,
+ json_prefix_info,
+ "%pFX", p);
json_array = NULL;
}
}
@@ -2574,7 +2562,6 @@ static void evpn_show_route_rd_macip(struct vty *vty, struct bgp *bgp,
safi_t safi;
uint32_t path_cnt = 0;
json_object *json_paths = NULL;
- char prefix_str[BUFSIZ];
afi = AFI_L2VPN;
safi = SAFI_EVPN;
@@ -2593,8 +2580,6 @@ static void evpn_show_route_rd_macip(struct vty *vty, struct bgp *bgp,
return;
}
- prefix2str(&p, prefix_str, sizeof(prefix_str));
-
/* Prefix and num paths displayed once per prefix. */
route_vty_out_detail_header(vty, bgp, dest, prd, afi, safi, json);
@@ -2619,7 +2604,7 @@ static void evpn_show_route_rd_macip(struct vty *vty, struct bgp *bgp,
if (json && path_cnt) {
if (path_cnt)
- json_object_object_add(json, prefix_str, json_paths);
+ json_object_object_addf(json, json_paths, "%pFX", &p);
json_object_int_add(json, "numPaths", path_cnt);
} else {
vty_out(vty, "\nDisplayed %u paths for requested prefix\n",
@@ -2678,12 +2663,8 @@ static void evpn_show_route_rd(struct vty *vty, struct bgp *bgp,
(const struct prefix_evpn *)bgp_dest_get_prefix(dest);
json_object *json_prefix = NULL;
json_object *json_paths = NULL;
- char prefix_str[BUFSIZ];
int add_prefix_to_json = 0;
- prefix2str((struct prefix_evpn *)evp, prefix_str,
- sizeof(prefix_str));
-
if (type && evp->prefix.route_type != type)
continue;
@@ -2739,8 +2720,8 @@ static void evpn_show_route_rd(struct vty *vty, struct bgp *bgp,
if (add_prefix_to_json) {
json_object_object_add(json_prefix, "paths",
json_paths);
- json_object_object_add(json_rd, prefix_str,
- json_prefix);
+ json_object_object_addf(json_rd, json_prefix,
+ "%pFX", evp);
} else {
json_object_free(json_paths);
json_object_free(json_prefix);
@@ -2798,7 +2779,6 @@ static void evpn_show_route_rd_all_macip(struct vty *vty, struct bgp *bgp,
json_object *json_prefix = NULL; /* prefix within an RD */
json_object *json_rd = NULL; /* holds all prefixes for RD */
char rd_str[RD_ADDRSTRLEN];
- char prefix_str[BUFSIZ];
int add_rd_to_json = 0;
struct prefix_evpn ep;
const struct prefix *rd_destp = bgp_dest_get_prefix(rd_dest);
@@ -2825,8 +2805,6 @@ static void evpn_show_route_rd_all_macip(struct vty *vty, struct bgp *bgp,
const struct prefix *p = bgp_dest_get_prefix(dest);
- prefix2str(p, prefix_str, sizeof(prefix_str));
-
pi = bgp_dest_get_bgp_path_info(dest);
if (pi) {
/* RD header - per RD. */
@@ -2838,8 +2816,8 @@ static void evpn_show_route_rd_all_macip(struct vty *vty, struct bgp *bgp,
if (json) {
json_prefix = json_object_new_object();
json_paths = json_object_new_array();
- json_object_string_add(json_prefix, "prefix",
- prefix_str);
+ json_object_string_addf(json_prefix, "prefix", "%pFX",
+ p);
json_object_int_add(json_prefix, "prefixLen",
p->prefixlen);
} else
@@ -2873,8 +2851,8 @@ static void evpn_show_route_rd_all_macip(struct vty *vty, struct bgp *bgp,
if (json) {
json_object_object_add(json_prefix, "paths",
json_paths);
- json_object_object_add(json_rd, prefix_str,
- json_prefix);
+ json_object_object_addf(json_rd, json_prefix, "%pFX",
+ p);
if (add_rd_to_json)
json_object_object_add(json, rd_str, json_rd);
else {
@@ -2954,13 +2932,9 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
const struct prefix_evpn *evp =
(const struct prefix_evpn *)bgp_dest_get_prefix(
dest);
- char prefix_str[BUFSIZ];
int add_prefix_to_json = 0;
const struct prefix *p = bgp_dest_get_prefix(dest);
- prefix2str((struct prefix_evpn *)p, prefix_str,
- sizeof(prefix_str));
-
if (type && evp->prefix.route_type != type)
continue;
@@ -2992,8 +2966,8 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
if (json) {
json_prefix = json_object_new_object();
json_paths = json_object_new_array();
- json_object_string_add(json_prefix, "prefix",
- prefix_str);
+ json_object_string_addf(json_prefix, "prefix",
+ "%pFX", p);
json_object_int_add(json_prefix, "prefixLen",
p->prefixlen);
}
@@ -3038,9 +3012,9 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
json_object_object_add(json_prefix,
"paths",
json_paths);
- json_object_object_add(json_rd,
- prefix_str,
- json_prefix);
+ json_object_object_addf(json_rd,
+ json_prefix,
+ "%pFX", p);
} else {
json_object_free(json_prefix);
json_object_free(json_paths);
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index b570c84d8b..7b96555913 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -574,7 +574,7 @@ void bgp_routeadv_timer(struct thread *thread)
zlog_debug("%s [FSM] Timer (routeadv timer expire)",
peer->host);
- peer->synctime = bgp_clock();
+ peer->synctime = monotime(NULL);
thread_add_timer_msec(bm->master, bgp_generate_updgrp_packets, peer, 0,
&peer->t_generate_updgrp_packets);
@@ -975,7 +975,7 @@ void bgp_start_routeadv(struct bgp *bgp)
*/
void bgp_adjust_routeadv(struct peer *peer)
{
- time_t nowtime = bgp_clock();
+ time_t nowtime = monotime(NULL);
double diff;
unsigned long remain;
@@ -987,7 +987,7 @@ void bgp_adjust_routeadv(struct peer *peer)
*/
THREAD_OFF(peer->t_routeadv);
- peer->synctime = bgp_clock();
+ peer->synctime = monotime(NULL);
/* If suppress fib pending is enabled, route is advertised to
* peers when the status is received from the FIB. The delay
* is added to update group packet generate which will allow
@@ -1471,7 +1471,7 @@ int bgp_stop(struct peer *peer)
}
/* set last reset time */
- peer->resettime = peer->uptime = bgp_clock();
+ peer->resettime = peer->uptime = monotime(NULL);
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
zlog_debug("%s remove from all update group",
@@ -2220,7 +2220,7 @@ static int bgp_establish(struct peer *peer)
if (!peer->v_holdtime)
bgp_keepalives_on(peer);
- peer->uptime = bgp_clock();
+ peer->uptime = monotime(NULL);
/* Send route-refresh when ORF is enabled.
* Stop Long-lived Graceful Restart timers.
diff --git a/bgpd/bgp_io.c b/bgpd/bgp_io.c
index aba28fa504..7af1fae280 100644
--- a/bgpd/bgp_io.c
+++ b/bgpd/bgp_io.c
@@ -431,7 +431,7 @@ static uint16_t bgp_write(struct peer *peer)
}
done : {
- now = bgp_clock();
+ now = monotime(NULL);
/*
* Update last_update if UPDATEs were written.
* Note: that these are only updated at end,
diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c
index 8772afd736..fa1dcf33e0 100644
--- a/bgpd/bgp_labelpool.c
+++ b/bgpd/bgp_labelpool.c
@@ -719,16 +719,14 @@ DEFUN(show_bgp_labelpool_ledger, show_bgp_labelpool_ledger_cmd,
vty_out(vty, "%-18s %u\n",
"INVALID", lcb->label);
else {
- char buf[PREFIX2STR_BUFFER];
p = bgp_dest_get_prefix(dest);
- prefix2str(p, buf, sizeof(buf));
if (uj) {
- json_object_string_add(json_elem,
- "prefix", buf);
+ json_object_string_addf(
+ json_elem, "prefix", "%pFX", p);
json_object_int_add(json_elem, "label",
lcb->label);
} else
- vty_out(vty, "%-18s %u\n", buf,
+ vty_out(vty, "%-18pFX %u\n", p,
lcb->label);
}
break;
@@ -812,16 +810,14 @@ DEFUN(show_bgp_labelpool_inuse, show_bgp_labelpool_inuse_cmd,
vty_out(vty, "INVALID %u\n",
label);
else {
- char buf[PREFIX2STR_BUFFER];
p = bgp_dest_get_prefix(dest);
- prefix2str(p, buf, sizeof(buf));
if (uj) {
- json_object_string_add(json_elem,
- "prefix", buf);
+ json_object_string_addf(
+ json_elem, "prefix", "%pFX", p);
json_object_int_add(json_elem, "label",
label);
} else
- vty_out(vty, "%-18s %u\n", buf,
+ vty_out(vty, "%-18pFX %u\n", p,
label);
}
break;
@@ -851,7 +847,6 @@ DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd,
json_object *json = NULL, *json_elem = NULL;
struct bgp_dest *dest;
const struct prefix *p;
- char buf[PREFIX2STR_BUFFER];
struct lp_fifo *item, *next;
int count;
@@ -893,12 +888,11 @@ DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd,
vty_out(vty, "INVALID\n");
} else {
p = bgp_dest_get_prefix(dest);
- prefix2str(p, buf, sizeof(buf));
if (uj)
- json_object_string_add(json_elem,
- "prefix", buf);
+ json_object_string_addf(
+ json_elem, "prefix", "%pFX", p);
else
- vty_out(vty, "%-18s\n", buf);
+ vty_out(vty, "%-18pFX\n", p);
}
break;
case LP_TYPE_VRF:
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index 7b8f0df2e2..e99c2ba661 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -925,7 +925,7 @@ leak_update(struct bgp *to_bgp, struct bgp_dest *bn,
bgp_aggregate_decrement(to_bgp, p, bpi, afi, safi);
bgp_attr_unintern(&bpi->attr);
bpi->attr = new_attr;
- bpi->uptime = bgp_clock();
+ bpi->uptime = monotime(NULL);
/*
* rewrite labels
diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c
index e1fcc743ec..971b1817c8 100644
--- a/bgpd/bgp_nexthop.c
+++ b/bgpd/bgp_nexthop.c
@@ -64,11 +64,6 @@ int bgp_nexthop_cache_compare(const struct bgp_nexthop_cache *a,
return prefix_cmp(&a->prefix, &b->prefix);
}
-const char *bnc_str(struct bgp_nexthop_cache *bnc, char *buf, int size)
-{
- return prefix2str(&bnc->prefix, buf, size);
-}
-
void bnc_nexthop_free(struct bgp_nexthop_cache *bnc)
{
nexthops_free(bnc->nexthop);
@@ -868,7 +863,7 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp,
if (!CHECK_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED))
vty_out(vty, " Is not Registered\n");
}
- tbuf = time(NULL) - (bgp_clock() - bnc->last_update);
+ tbuf = time(NULL) - (monotime(NULL) - bnc->last_update);
vty_out(vty, " Last update: %s", ctime(&tbuf));
vty_out(vty, "\n");
diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h
index 9d653ef4dc..efad906d0a 100644
--- a/bgpd/bgp_nexthop.h
+++ b/bgpd/bgp_nexthop.h
@@ -161,7 +161,6 @@ extern struct bgp_nexthop_cache *bnc_find(struct bgp_nexthop_cache_head *tree,
uint32_t srte_color,
ifindex_t ifindex);
extern void bnc_nexthop_free(struct bgp_nexthop_cache *bnc);
-extern const char *bnc_str(struct bgp_nexthop_cache *bnc, char *buf, int size);
extern void bgp_scan_init(struct bgp *bgp);
extern void bgp_scan_finish(struct bgp *bgp);
extern void bgp_scan_vty_init(void);
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index 344608fda1..61f1b295ca 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -82,13 +82,10 @@ static int bgp_isvalid_labeled_nexthop(struct bgp_nexthop_cache *bnc)
static void bgp_unlink_nexthop_check(struct bgp_nexthop_cache *bnc)
{
if (LIST_EMPTY(&(bnc->paths)) && !bnc->nht_info) {
- if (BGP_DEBUG(nht, NHT)) {
- char buf[PREFIX2STR_BUFFER];
- zlog_debug("%s: freeing bnc %s(%d)(%u)(%s)", __func__,
- bnc_str(bnc, buf, PREFIX2STR_BUFFER),
- bnc->ifindex, bnc->srte_color,
+ if (BGP_DEBUG(nht, NHT))
+ zlog_debug("%s: freeing bnc %pFX(%d)(%u)(%s)", __func__,
+ &bnc->prefix, bnc->ifindex, bnc->srte_color,
bnc->bgp->name_pretty);
- }
/* only unregister if this is the last nh for this prefix*/
if (!bnc_existing_for_prefix(bnc))
unregister_zebra_rnh(bnc);
@@ -261,24 +258,17 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
if (!bnc) {
bnc = bnc_new(tree, &p, srte_color, ifindex);
bnc->bgp = bgp_nexthop;
- if (BGP_DEBUG(nht, NHT)) {
- char buf[PREFIX2STR_BUFFER];
-
- zlog_debug("Allocated bnc %s(%d)(%u)(%s) peer %p",
- bnc_str(bnc, buf, PREFIX2STR_BUFFER),
- bnc->ifindex, bnc->srte_color,
+ if (BGP_DEBUG(nht, NHT))
+ zlog_debug("Allocated bnc %pFX(%d)(%u)(%s) peer %p",
+ &bnc->prefix, bnc->ifindex, bnc->srte_color,
bnc->bgp->name_pretty, peer);
- }
} else {
- if (BGP_DEBUG(nht, NHT)) {
- char buf[PREFIX2STR_BUFFER];
-
+ if (BGP_DEBUG(nht, NHT))
zlog_debug(
- "Found existing bnc %s(%d)(%s) flags 0x%x ifindex %d #paths %d peer %p",
- bnc_str(bnc, buf, PREFIX2STR_BUFFER),
- bnc->ifindex, bnc->bgp->name_pretty, bnc->flags,
- bnc->ifindex, bnc->path_count, bnc->nht_info);
- }
+ "Found existing bnc %pFX(%d)(%s) flags 0x%x ifindex %d #paths %d peer %p",
+ &bnc->prefix, bnc->ifindex,
+ bnc->bgp->name_pretty, bnc->flags, bnc->ifindex,
+ bnc->path_count, bnc->nht_info);
}
if (pi && is_route_parent_evpn(pi))
@@ -436,7 +426,7 @@ static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc,
int i;
bool evpn_resolved = false;
- bnc->last_update = bgp_clock();
+ bnc->last_update = monotime(NULL);
bnc->change_flags = 0;
/* debug print the input */
@@ -563,16 +553,12 @@ static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc,
if (bnc->is_evpn_gwip_nexthop) {
evpn_resolved = bgp_evpn_is_gateway_ip_resolved(bnc);
- if (BGP_DEBUG(nht, NHT)) {
- char buf2[PREFIX2STR_BUFFER];
-
- prefix2str(&bnc->prefix, buf2, sizeof(buf2));
+ if (BGP_DEBUG(nht, NHT))
zlog_debug(
- "EVPN gateway IP %s recursive MAC/IP lookup %s",
- buf2,
+ "EVPN gateway IP %pFX recursive MAC/IP lookup %s",
+ &bnc->prefix,
(evpn_resolved ? "successful"
: "failed"));
- }
if (evpn_resolved) {
bnc->flags |= BGP_NEXTHOP_VALID;
@@ -609,7 +595,7 @@ static void bgp_nht_ifp_table_handle(struct bgp *bgp,
if (bnc->ifindex != ifp->ifindex)
continue;
- bnc->last_update = bgp_clock();
+ bnc->last_update = monotime(NULL);
bnc->change_flags = 0;
/*
@@ -1023,14 +1009,12 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
const struct prefix *p;
if (BGP_DEBUG(nht, NHT)) {
- char buf[PREFIX2STR_BUFFER];
char bnc_buf[BNC_FLAG_DUMP_SIZE];
char chg_buf[BNC_FLAG_DUMP_SIZE];
- bnc_str(bnc, buf, PREFIX2STR_BUFFER);
zlog_debug(
- "NH update for %s(%d)(%u)(%s) - flags %s chgflags %s- evaluate paths",
- buf, bnc->ifindex, bnc->srte_color,
+ "NH update for %pFX(%d)(%u)(%s) - flags %s chgflags %s- evaluate paths",
+ &bnc->prefix, bnc->ifindex, bnc->srte_color,
bnc->bgp->name_pretty,
bgp_nexthop_dump_bnc_flags(bnc, bnc_buf,
sizeof(bnc_buf)),
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index fe1887565e..7daac44946 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -131,11 +131,11 @@ static void bgp_packet_add(struct peer *peer, struct stream *s)
* after it'll get confused
*/
if (!stream_fifo_count_safe(peer->obuf))
- peer->last_sendq_ok = bgp_clock();
+ peer->last_sendq_ok = monotime(NULL);
stream_fifo_push(peer->obuf, s);
- delta = bgp_clock() - peer->last_sendq_ok;
+ delta = monotime(NULL) - peer->last_sendq_ok;
holdtime = atomic_load_explicit(&peer->holdtime,
memory_order_relaxed);
@@ -156,12 +156,12 @@ static void bgp_packet_add(struct peer *peer, struct stream *s)
peer->host);
BGP_EVENT_ADD(peer, TCP_fatal_error);
} else if (delta > (intmax_t)holdtime &&
- bgp_clock() - peer->last_sendq_warn > 5) {
+ monotime(NULL) - peer->last_sendq_warn > 5) {
flog_warn(
EC_BGP_SENDQ_STUCK_WARN,
"%s has not made any SendQ progress for 1 holdtime, peer overloaded?",
peer->host);
- peer->last_sendq_warn = bgp_clock();
+ peer->last_sendq_warn = monotime(NULL);
}
}
}
@@ -2026,7 +2026,7 @@ static int bgp_update_receive(struct peer *peer, bgp_size_t size)
interned in bgp_attr_parse(). */
bgp_attr_unintern_sub(&attr);
- peer->update_time = bgp_clock();
+ peer->update_time = monotime(NULL);
/* Notify BGP Conditional advertisement scanner process */
peer->advmap_table_change = true;
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 04f955f97a..989b361597 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -3672,7 +3672,7 @@ struct bgp_path_info *info_make(int type, int sub_type, unsigned short instance,
new->sub_type = sub_type;
new->peer = peer;
new->attr = attr;
- new->uptime = bgp_clock();
+ new->uptime = monotime(NULL);
new->net = dest;
return new;
}
@@ -4062,7 +4062,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
/* If the update is implicit withdraw. */
if (pi) {
- pi->uptime = bgp_clock();
+ pi->uptime = monotime(NULL);
same_attr = attrhash_cmp(pi->attr, attr_new);
hook_call(bgp_process, bgp, afi, safi, dest, peer, true);
@@ -5995,7 +5995,7 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p,
#endif
bgp_attr_unintern(&pi->attr);
pi->attr = attr_new;
- pi->uptime = bgp_clock();
+ pi->uptime = monotime(NULL);
#ifdef ENABLE_BGP_VNC
if ((afi == AFI_IP || afi == AFI_IP6)
&& (safi == SAFI_UNICAST)) {
@@ -6297,7 +6297,7 @@ static void bgp_static_update_safi(struct bgp *bgp, const struct prefix *p,
bgp_aggregate_decrement(bgp, p, pi, afi, safi);
bgp_attr_unintern(&pi->attr);
pi->attr = attr_new;
- pi->uptime = bgp_clock();
+ pi->uptime = monotime(NULL);
#ifdef ENABLE_BGP_VNC
if (pi->extra)
label = decode_label(&pi->extra->label[0]);
@@ -8521,7 +8521,7 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
bgp, p, bpi, afi, SAFI_UNICAST);
bgp_attr_unintern(&bpi->attr);
bpi->attr = new_attr;
- bpi->uptime = bgp_clock();
+ bpi->uptime = monotime(NULL);
/* Process change. */
bgp_aggregate_increment(bgp, p, bpi, afi,
@@ -9456,9 +9456,7 @@ void route_vty_out_tmp(struct vty *vty, struct bgp_dest *dest,
json_object_boolean_true_add(json_status, ">");
json_object_object_add(json_net, "appliedStatusSymbols",
json_status);
-
- prefix2str(p, buff, PREFIX_STRLEN);
- json_object_object_add(json_ar, buff, json_net);
+ json_object_object_addf(json_ar, json_net, "%pFX", p);
} else
vty_out(vty, "\n");
}
@@ -10052,7 +10050,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
vty_out(vty, " Gateway IP %s", gwip_buf);
}
- if (safi == SAFI_EVPN)
+ if (safi == SAFI_EVPN && !json_path)
vty_out(vty, "\n");
/* Line1 display AS-path, Aggregator */
@@ -10808,7 +10806,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
}
/* Line 9 display Uptime */
- tbuf = time(NULL) - (bgp_clock() - path->uptime);
+ tbuf = time(NULL) - (monotime(NULL) - path->uptime);
if (json_paths) {
json_last_update = json_object_new_object();
json_object_int_add(json_last_update, "epoch", tbuf);
@@ -11545,7 +11543,6 @@ void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp,
has_valid_label = bgp_is_valid_label(&label);
if (safi == SAFI_EVPN) {
-
if (!json) {
vty_out(vty, "BGP routing table entry for %s%s%pFX\n",
prd ? prefix_rd2str(prd, buf1, sizeof(buf1))
diff --git a/bgpd/bgp_snmp.c b/bgpd/bgp_snmp.c
index e25d8d90db..6bc313464a 100644
--- a/bgpd/bgp_snmp.c
+++ b/bgpd/bgp_snmp.c
@@ -588,7 +588,7 @@ static uint8_t *bgpPeerTable(struct variable *v, oid name[], size_t *length,
if (peer->uptime == 0)
return SNMP_INTEGER(0);
else
- return SNMP_INTEGER(bgp_clock() - peer->uptime);
+ return SNMP_INTEGER(monotime(NULL) - peer->uptime);
case BGPPEERCONNECTRETRYINTERVAL:
*write_method = write_bgpPeerTable;
return SNMP_INTEGER(peer->v_connect);
@@ -615,7 +615,7 @@ static uint8_t *bgpPeerTable(struct variable *v, oid name[], size_t *length,
if (peer->update_time == 0)
return SNMP_INTEGER(0);
else
- return SNMP_INTEGER(bgp_clock() - peer->update_time);
+ return SNMP_INTEGER(monotime(NULL) - peer->update_time);
default:
return NULL;
}
diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c
index f1173941a0..3a974910fa 100644
--- a/bgpd/bgp_updgrp.c
+++ b/bgpd/bgp_updgrp.c
@@ -70,14 +70,14 @@
static void update_group_checkin(struct update_group *updgrp)
{
updgrp->id = ++bm->updgrp_idspace;
- updgrp->uptime = bgp_clock();
+ updgrp->uptime = monotime(NULL);
}
static void update_subgroup_checkin(struct update_subgroup *subgrp,
struct update_group *updgrp)
{
subgrp->id = ++bm->subgrp_idspace;
- subgrp->uptime = bgp_clock();
+ subgrp->uptime = monotime(NULL);
}
static void sync_init(struct update_subgroup *subgrp,
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 0eba5ea447..cfa6614566 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -9573,10 +9573,8 @@ DEFPY (show_bgp_srv6,
vty_out(vty, "locator_name: %s\n", bgp->srv6_locator_name);
vty_out(vty, "locator_chunks:\n");
- for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) {
- prefix2str(&chunk->prefix, buf, sizeof(buf));
- vty_out(vty, "- %s\n", buf);
- }
+ for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk))
+ vty_out(vty, "- %pFX\n", &chunk->prefix);
vty_out(vty, "functions:\n");
for (ALL_LIST_ELEMENTS_RO(bgp->srv6_functions, node, func)) {
@@ -12723,7 +12721,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
if (peer_established(p)) {
time_t uptime;
- uptime = bgp_clock();
+ uptime = monotime(NULL);
uptime -= p->uptime;
epoch_tbuf = time(NULL) - uptime;
@@ -12751,7 +12749,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
time_t uptime;
struct tm tm;
- uptime = bgp_clock();
+ uptime = monotime(NULL);
uptime -= p->readtime;
gmtime_r(&uptime, &tm);
@@ -12759,7 +12757,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
(tm.tm_sec * 1000) + (tm.tm_min * 60000)
+ (tm.tm_hour * 3600000));
- uptime = bgp_clock();
+ uptime = monotime(NULL);
uptime -= p->last_write;
gmtime_r(&uptime, &tm);
@@ -12767,7 +12765,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
(tm.tm_sec * 1000) + (tm.tm_min * 60000)
+ (tm.tm_hour * 3600000));
- uptime = bgp_clock();
+ uptime = monotime(NULL);
uptime -= p->update_time;
gmtime_r(&uptime, &tm);
@@ -14039,7 +14037,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
time_t uptime;
struct tm tm;
- uptime = bgp_clock();
+ uptime = monotime(NULL);
uptime -= p->resettime;
gmtime_r(&uptime, &tm);
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index c17bd76ad7..036bbbd6b6 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -531,17 +531,6 @@ void bgp_cluster_id_unset(struct bgp *bgp)
}
}
-/* time_t value that is monotonicly increasing
- * and uneffected by adjustments to system clock
- */
-time_t bgp_clock(void)
-{
- struct timeval tv;
-
- monotime(&tv);
- return tv.tv_sec;
-}
-
/* BGP timer configuration. */
void bgp_timers_set(struct bgp *bgp, uint32_t keepalive, uint32_t holdtime,
uint32_t connect_retry, uint32_t delayopen)
@@ -1760,7 +1749,7 @@ struct peer *peer_create(union sockunion *su, const char *conf_if,
}
/* Last read and reset time set */
- peer->readtime = peer->resettime = bgp_clock();
+ peer->readtime = peer->resettime = monotime(NULL);
/* Default TTL set. */
peer->ttl = (peer->sort == BGP_PEER_IBGP) ? MAXTTL : BGP_DEFAULT_TTL;
@@ -4017,7 +4006,6 @@ struct peer *peer_lookup_dynamic_neighbor(struct bgp *bgp, union sockunion *su)
struct prefix prefix;
struct prefix *listen_range;
int dncount;
- char buf[PREFIX2STR_BUFFER];
if (!sockunion2hostprefix(su, &prefix))
return NULL;
@@ -4034,21 +4022,19 @@ struct peer *peer_lookup_dynamic_neighbor(struct bgp *bgp, union sockunion *su)
if (!gbgp)
return NULL;
- prefix2str(&prefix, buf, sizeof(buf));
-
if (bgp_debug_neighbor_events(NULL))
zlog_debug(
- "Dynamic Neighbor %s matches group %s listen range %pFX",
- buf, group->name, listen_range);
+ "Dynamic Neighbor %pFX matches group %s listen range %pFX",
+ &prefix, group->name, listen_range);
/* Are we within the listen limit? */
dncount = gbgp->dynamic_neighbors_count;
if (dncount >= gbgp->dynamic_neighbors_limit) {
if (bgp_debug_neighbor_events(NULL))
- zlog_debug("Dynamic Neighbor %s rejected - at limit %d",
- inet_sutop(su, buf),
- gbgp->dynamic_neighbors_limit);
+ zlog_debug(
+ "Dynamic Neighbor %pFX rejected - at limit %d",
+ &prefix, gbgp->dynamic_neighbors_limit);
return NULL;
}
@@ -4056,8 +4042,8 @@ struct peer *peer_lookup_dynamic_neighbor(struct bgp *bgp, union sockunion *su)
if (CHECK_FLAG(group->conf->flags, PEER_FLAG_SHUTDOWN)) {
if (bgp_debug_neighbor_events(NULL))
zlog_debug(
- "Dynamic Neighbor %s rejected - group %s disabled",
- buf, group->name);
+ "Dynamic Neighbor %pFX rejected - group %s disabled",
+ &prefix, group->name);
return NULL;
}
@@ -4065,8 +4051,8 @@ struct peer *peer_lookup_dynamic_neighbor(struct bgp *bgp, union sockunion *su)
if (!peer_group_af_configured(group)) {
if (bgp_debug_neighbor_events(NULL))
zlog_debug(
- "Dynamic Neighbor %s rejected - no AF activated for group %s",
- buf, group->name);
+ "Dynamic Neighbor %pFX rejected - no AF activated for group %s",
+ &prefix, group->name);
return NULL;
}
@@ -7960,7 +7946,7 @@ char *peer_uptime(time_t uptime2, char *buf, size_t len, bool use_json,
}
/* Get current time. */
- uptime1 = bgp_clock();
+ uptime1 = monotime(NULL);
uptime1 -= uptime2;
gmtime_r(&uptime1, &tm);
@@ -8002,7 +7988,7 @@ void bgp_master_init(struct thread_master *master, const int buffer_size,
bm->port = BGP_PORT_DEFAULT;
bm->addresses = addresses;
bm->master = master;
- bm->start_time = bgp_clock();
+ bm->start_time = monotime(NULL);
bm->t_rmap_update = NULL;
bm->rmap_update_timer = RMAP_DEFAULT_UPDATE_TIMER;
bm->v_update_delay = BGP_UPDATE_DELAY_DEF;
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 8348b37b8e..dc7ad32a50 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -2043,7 +2043,6 @@ extern unsigned int multipath_num;
/* Prototypes. */
extern void bgp_terminate(void);
extern void bgp_reset(void);
-extern time_t bgp_clock(void);
extern void bgp_zclient_reset(void);
extern struct bgp *bgp_get_default(void);
extern struct bgp *bgp_lookup(as_t, const char *);
@@ -2449,7 +2448,7 @@ static inline int peer_group_af_configured(struct peer_group *group)
static inline char *timestamp_string(time_t ts)
{
time_t tbuf;
- tbuf = time(NULL) - (bgp_clock() - ts);
+ tbuf = time(NULL) - (monotime(NULL) - ts);
return ctime(&tbuf);
}
diff --git a/bgpd/rfapi/bgp_rfapi_cfg.c b/bgpd/rfapi/bgp_rfapi_cfg.c
index 2437bd8cfe..831f92996a 100644
--- a/bgpd/rfapi/bgp_rfapi_cfg.c
+++ b/bgpd/rfapi/bgp_rfapi_cfg.c
@@ -94,7 +94,7 @@ DEFINE_QOBJ_TYPE(rfapi_l2_group_cfg);
*/
time_t rfapi_time(time_t *t)
{
- time_t clock = bgp_clock();
+ time_t clock = monotime(NULL);
if (t)
*t = clock;
return clock;
diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c
index 382886e0bd..a34c10d842 100644
--- a/bgpd/rfapi/rfapi.c
+++ b/bgpd/rfapi/rfapi.c
@@ -1006,7 +1006,7 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */
bgp_aggregate_decrement(bgp, p, bpi, afi, safi);
bgp_attr_unintern(&bpi->attr);
bpi->attr = new_attr;
- bpi->uptime = bgp_clock();
+ bpi->uptime = monotime(NULL);
if (safi == SAFI_MPLS_VPN) {
@@ -1351,8 +1351,7 @@ int rfapi_init_and_open(struct bgp *bgp, struct rfapi_descriptor *rfd,
struct prefix pfx_un;
struct agg_node *rn;
-
- rfapi_time(&rfd->open_time);
+ rfd->open_time = monotime(NULL);
if (rfg->type == RFAPI_GROUP_CFG_VRF)
SET_FLAG(rfd->flags, RFAPI_HD_FLAG_IS_VRF);
@@ -1521,10 +1520,10 @@ rfapi_query_inner(void *handle, struct rfapi_ip_addr *target,
}
rfd->rsp_counter++; /* dedup: identify this generation */
- rfd->rsp_time = rfapi_time(NULL); /* response content dedup */
+ rfd->rsp_time = monotime(NULL); /* response content dedup */
rfd->ftd_last_allowed_time =
- bgp_clock()
- - bgp->rfapi_cfg->rfp_cfg.ftd_advertisement_interval;
+ monotime(NULL) -
+ bgp->rfapi_cfg->rfp_cfg.ftd_advertisement_interval;
if (l2o) {
if (!memcmp(l2o->macaddr.octet, rfapi_ethaddr0.octet,
diff --git a/bgpd/rfapi/rfapi_import.c b/bgpd/rfapi/rfapi_import.c
index 1d42702769..6fb509fd9c 100644
--- a/bgpd/rfapi/rfapi_import.c
+++ b/bgpd/rfapi/rfapi_import.c
@@ -489,7 +489,7 @@ static struct bgp_path_info *rfapiBgpInfoCreate(struct attr *attr,
bgp_path_info_extra_get(new);
if (prd) {
new->extra->vnc.import.rd = *prd;
- rfapi_time(&new->extra->vnc.import.create_time);
+ new->extra->vnc.import.create_time = monotime(NULL);
}
if (label)
encode_label(*label, &new->extra->label[0]);
diff --git a/bgpd/rfapi/rfapi_private.h b/bgpd/rfapi/rfapi_private.h
index bc0e192ae2..8c76e1dd0b 100644
--- a/bgpd/rfapi/rfapi_private.h
+++ b/bgpd/rfapi/rfapi_private.h
@@ -364,6 +364,11 @@ extern int rfapi_extract_l2o(
* compaitibility to old quagga_time call
* time_t value in terms of stabilised absolute time.
* replacement for POSIX time()
+ *
+ * Please do not use this. This is kept only for
+ * Lou's CI in that that CI compiles against some
+ * private bgp code and it will just fail to compile
+ * without this. Use monotime()
*/
extern time_t rfapi_time(time_t *t);
diff --git a/bgpd/rfapi/rfapi_rib.c b/bgpd/rfapi/rfapi_rib.c
index 9d61ada7db..9e13c48134 100644
--- a/bgpd/rfapi/rfapi_rib.c
+++ b/bgpd/rfapi/rfapi_rib.c
@@ -784,7 +784,7 @@ int rfapiRibPreloadBi(
skiplist_insert(slRibPt, &ori->rk, ori);
}
- ori->last_sent_time = rfapi_time(NULL);
+ ori->last_sent_time = monotime(NULL);
/*
* poke timer
@@ -797,7 +797,7 @@ int rfapiRibPreloadBi(
* Update last sent time for prefix
*/
trn = agg_node_get(rfd->rsp_times[afi], p); /* locks trn */
- trn->info = (void *)(uintptr_t)bgp_clock();
+ trn->info = (void *)(uintptr_t)monotime(NULL);
if (agg_node_get_lock_count(trn) > 1)
agg_unlock_node(trn);
@@ -1089,7 +1089,7 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
rfapiFreeBgpTeaOptionChain(ori->tea_options);
ori->tea_options =
rfapiOptionsDup(ri->tea_options);
- ori->last_sent_time = rfapi_time(NULL);
+ ori->last_sent_time = monotime(NULL);
rfapiFreeRfapiVnOptionChain(ori->vn_options);
ori->vn_options =
@@ -1115,7 +1115,7 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
ori->lifetime = ri->lifetime;
ori->tea_options =
rfapiOptionsDup(ri->tea_options);
- ori->last_sent_time = rfapi_time(NULL);
+ ori->last_sent_time = monotime(NULL);
ori->vn_options =
rfapiVnOptionsDup(ri->vn_options);
ori->un_options =
@@ -1227,7 +1227,7 @@ callback:
*/
trn = agg_node_get(rfd->rsp_times[afi],
p); /* locks trn */
- trn->info = (void *)(uintptr_t)bgp_clock();
+ trn->info = (void *)(uintptr_t)monotime(NULL);
if (agg_node_get_lock_count(trn) > 1)
agg_unlock_node(trn);
@@ -1376,7 +1376,7 @@ callback:
rfapiRibStartTimer(rfd, ri, rn, 1);
RFAPI_RIB_CHECK_COUNTS(
0, delete_list->count);
- ri->last_sent_time = rfapi_time(NULL);
+ ri->last_sent_time = monotime(NULL);
#if DEBUG_RIB_SL_RD
{
char buf_rd[RD_ADDRSTRLEN];
@@ -1400,7 +1400,7 @@ callback:
rfapiRibStartTimer(rfd, ri_del, rn, 1);
RFAPI_RIB_CHECK_COUNTS(
0, delete_list->count);
- ri->last_sent_time = rfapi_time(NULL);
+ ri->last_sent_time = monotime(NULL);
}
}
} else {
@@ -1849,7 +1849,7 @@ rfapiRibPreload(struct bgp *bgp, struct rfapi_descriptor *rfd,
vnc_zlog_debug_verbose("%s: loading response=%p, use_eth_resolution=%d",
__func__, response, use_eth_resolution);
- new_last_sent_time = rfapi_time(NULL);
+ new_last_sent_time = monotime(NULL);
for (nhp = response; nhp; nhp = nhp_next) {
@@ -2019,7 +2019,7 @@ rfapiRibPreload(struct bgp *bgp, struct rfapi_descriptor *rfd,
ri->lifetime = nhp->lifetime;
ri->vn_options = rfapiVnOptionsDup(nhp->vn_options);
ri->rsp_counter = rfd->rsp_counter;
- ri->last_sent_time = rfapi_time(NULL);
+ ri->last_sent_time = monotime(NULL);
if (need_insert) {
int rc;
@@ -2042,7 +2042,7 @@ rfapiRibPreload(struct bgp *bgp, struct rfapi_descriptor *rfd,
* update this NVE's timestamp for this prefix
*/
trn = agg_node_get(rfd->rsp_times[afi], &pfx); /* locks trn */
- trn->info = (void *)(uintptr_t)bgp_clock();
+ trn->info = (void *)(uintptr_t)monotime(NULL);
if (agg_node_get_lock_count(trn) > 1)
agg_unlock_node(trn);
@@ -2275,7 +2275,7 @@ static int print_rib_sl(int (*fp)(void *, const char *, ...), struct vty *vty,
rfapiFormatAge(ri->last_sent_time, str_age, BUFSIZ);
#else
{
- time_t now = rfapi_time(NULL);
+ time_t now = monotime(NULL);
time_t expire =
ri->last_sent_time + (time_t)ri->lifetime;
/* allow for delayed/async removal */
diff --git a/bgpd/rfapi/rfapi_vty.c b/bgpd/rfapi/rfapi_vty.c
index c8fdadcac9..a8ab618417 100644
--- a/bgpd/rfapi/rfapi_vty.c
+++ b/bgpd/rfapi/rfapi_vty.c
@@ -109,7 +109,7 @@ char *rfapiFormatAge(time_t age, char *buf, size_t len)
{
time_t now, age_adjusted;
- now = rfapi_time(NULL);
+ now = monotime(NULL);
age_adjusted = now - age;
return rfapiFormatSeconds(age_adjusted, buf, len);
diff --git a/debian/control b/debian/control
index e8bf1a8ffa..06c16cc945 100644
--- a/debian/control
+++ b/debian/control
@@ -30,6 +30,7 @@ Build-Depends: bison,
python3-pytest <!nocheck>,
python3-sphinx,
texinfo (>= 4.7),
+ lua5.3 <pkg.frr.lua>,
liblua5.3-dev <pkg.frr.lua>
Standards-Version: 4.5.0.3
Homepage: https://www.frrouting.org/
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index e31bfe7bfa..e6c4076300 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -2490,6 +2490,9 @@ Extended Community Lists
there is no matched entry, deny will be returned. When `extcommunity` is
empty it matches to any routes.
+ A special handling for ``internet`` community is applied. It matches
+ any community.
+
.. clicmd:: bgp extcommunity-list expanded NAME permit|deny LINE
This command defines a new expanded extcommunity-list. `line` is a string
diff --git a/doc/user/pimv6.rst b/doc/user/pimv6.rst
index 851e58b814..74891c88b9 100644
--- a/doc/user/pimv6.rst
+++ b/doc/user/pimv6.rst
@@ -325,6 +325,9 @@ MLD state
a MLDv2 querier. MLDv1 joins are recorded as "untracked" and shown in the
``NonTrkSeen`` output column.
+.. clicmd:: show ipv6 mld [vrf NAME] groups [json]
+
+ Display MLD group information.
General multicast routing state
-------------------------------
diff --git a/gdb/lib.txt b/gdb/lib.txt
index 913b455ed1..b44c237985 100644
--- a/gdb/lib.txt
+++ b/gdb/lib.txt
@@ -293,3 +293,25 @@ Arguments:
1st: A (struct route_node *) to the top of the route table.
2nd: The (struct route_node *) to walk up from
end
+
+define mq_walk
+ set $mg = (struct memgroup *)$arg0
+
+ while ($mg)
+ printf "showing active allocations in memory group %s\n", $mg->name
+ set $mt = (struct memtype *)$mg->types
+ while ($mt)
+ printf "memstats: %s:%zu\n", $mt->name, $mt->n_alloc
+ set $mt = $mt->next
+ end
+ set $mg = $mg->next
+ end
+
+document mg_walk
+Walk the memory data structures to show what is holding memory.
+
+Arguments:
+1st: A (struct memgroup *) where to start the walk. If you are not
+ sure where to start pass it mg_first, which is a global DS for
+ all memory allocated in FRR
+end
diff --git a/lib/sigevent.c b/lib/sigevent.c
index 0f20bc0270..985bedeb92 100644
--- a/lib/sigevent.c
+++ b/lib/sigevent.c
@@ -134,8 +134,7 @@ int frr_sigevent_process(void)
#ifdef SIGEVENT_BLOCK_SIGNALS
if (sigprocmask(SIG_UNBLOCK, &oldmask, NULL) < 0)
- ;
- return -1;
+ return -1;
#endif /* SIGEVENT_BLOCK_SIGNALS */
return 0;
diff --git a/nhrpd/nhrp_interface.c b/nhrpd/nhrp_interface.c
index 1092ce13a1..4ac30a7d75 100644
--- a/nhrpd/nhrp_interface.c
+++ b/nhrpd/nhrp_interface.c
@@ -165,8 +165,7 @@ static void nhrp_interface_interface_notifier(struct notifier_block *n,
switch (cmd) {
case NOTIFY_INTERFACE_CHANGED:
- nhrp_interface_update_mtu(nifp->ifp, AFI_IP);
- nhrp_interface_update_source(nifp->ifp);
+ nhrp_interface_update_nbma(nifp->ifp, NULL);
break;
case NOTIFY_INTERFACE_ADDRESS_CHANGED:
nifp->nbma = nbmanifp->afi[AFI_IP].addr;
diff --git a/ospf6d/ospf6_abr.c b/ospf6d/ospf6_abr.c
index 5af1139d9b..e9c42bb80c 100644
--- a/ospf6d/ospf6_abr.c
+++ b/ospf6d/ospf6_abr.c
@@ -488,7 +488,12 @@ int ospf6_abr_originate_summary_to_area(struct ospf6_route *route,
zlog_debug(
"Suppressed by range %pFX of area %s",
&range->prefix, route_area->name);
- ospf6_abr_delete_route(summary, summary_table, old);
+ /* The existing summary route could be a range, don't
+ * remove it in this case
+ */
+ if (summary && summary->type != OSPF6_DEST_TYPE_RANGE)
+ ospf6_abr_delete_route(summary, summary_table,
+ old);
return 0;
}
}
diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c
index a6572794aa..7d72487686 100644
--- a/ospfd/ospf_vty.c
+++ b/ospfd/ospf_vty.c
@@ -10118,6 +10118,21 @@ static int ospf_print_vty_helper_dis_rtr_walkcb(struct hash_bucket *bucket,
return HASHWALK_CONTINUE;
}
+static int ospf_print_json_helper_enabled_rtr_walkcb(struct hash_bucket *bucket,
+ void *arg)
+{
+ struct advRtr *rtr = bucket->data;
+ struct json_object *json_rid_array = arg;
+ struct json_object *json_rid;
+
+ json_rid = json_object_new_object();
+
+ json_object_string_addf(json_rid, "routerId", "%pI4", &rtr->advRtrAddr);
+ json_object_array_add(json_rid_array, json_rid);
+
+ return HASHWALK_CONTINUE;
+}
+
static int ospf_show_gr_helper_details(struct vty *vty, struct ospf *ospf,
uint8_t use_vrf, json_object *json,
bool uj, bool detail)
@@ -10237,6 +10252,18 @@ CPP_NOTICE("Remove JSON object commands with keys starting with capital")
if (ospf->active_restarter_cnt)
json_object_int_add(json_vrf, "activeRestarterCnt",
ospf->active_restarter_cnt);
+
+ if (OSPF_HELPER_ENABLE_RTR_COUNT(ospf)) {
+ struct json_object *json_rid_array =
+ json_object_new_array();
+
+ json_object_object_add(json_vrf, "enabledRouterIds",
+ json_rid_array);
+
+ hash_walk(ospf->enable_rtr_list,
+ ospf_print_json_helper_enabled_rtr_walkcb,
+ json_rid_array);
+ }
}
diff --git a/pimd/mtracebis_netlink.c b/pimd/mtracebis_netlink.c
index fe2cb56a26..81e28f2407 100644
--- a/pimd/mtracebis_netlink.c
+++ b/pimd/mtracebis_netlink.c
@@ -92,7 +92,7 @@ int rtnl_open_byproto(struct rtnl_handle *rth, unsigned subscriptions,
rth->local.nl_family);
return -1;
}
- rth->seq = time(NULL);
+ rth->seq = (uint32_t)time(NULL);
return 0;
}
diff --git a/pimd/pim6_mld.c b/pimd/pim6_mld.c
index c5c98d8024..badc25b473 100644
--- a/pimd/pim6_mld.c
+++ b/pimd/pim6_mld.c
@@ -33,12 +33,15 @@
#include "lib/prefix.h"
#include "lib/checksum.h"
#include "lib/thread.h"
+#include "termtable.h"
#include "pimd/pim6_mld.h"
#include "pimd/pim6_mld_protocol.h"
#include "pimd/pim_memory.h"
#include "pimd/pim_instance.h"
#include "pimd/pim_iface.h"
+#include "pimd/pim6_cmd.h"
+#include "pimd/pim_cmd_common.h"
#include "pimd/pim_util.h"
#include "pimd/pim_tib.h"
#include "pimd/pimd.h"
@@ -2246,8 +2249,16 @@ void gm_ifp_update(struct interface *ifp)
return;
}
- if (!pim_ifp->mld)
+ /*
+ * If ipv6 mld is not enabled on interface, do not start mld activites.
+ */
+ if (!pim_ifp->gm_enable)
+ return;
+
+ if (!pim_ifp->mld) {
+ changed = true;
gm_start(ifp);
+ }
gm_ifp = pim_ifp->mld;
if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest))
@@ -2306,8 +2317,6 @@ void gm_group_delete(struct gm_if *gm_ifp)
#include "pimd/pim6_mld_clippy.c"
#endif
-#define MLD_STR "Multicast Listener Discovery\n"
-
static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str,
int *err)
{
@@ -2847,6 +2856,125 @@ DEFPY(gm_show_interface_joins,
return vty_json(vty, js);
}
+static void gm_show_groups(struct vty *vty, struct vrf *vrf, bool uj)
+{
+ struct interface *ifp;
+ struct ttable *tt = NULL;
+ char *table;
+ json_object *json = NULL;
+ json_object *json_iface = NULL;
+ json_object *json_group = NULL;
+ json_object *json_groups = NULL;
+ struct pim_instance *pim = vrf->info;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_int_add(json, "totalGroups", pim->gm_group_count);
+ json_object_int_add(json, "watermarkLimit",
+ pim->gm_watermark_limit);
+ } else {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Interface|Group|Version|Uptime");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+
+ vty_out(vty, "Total MLD groups: %u\n", pim->gm_group_count);
+ vty_out(vty, "Watermark warn limit(%s): %u\n",
+ pim->gm_watermark_limit ? "Set" : "Not Set",
+ pim->gm_watermark_limit);
+ }
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (vrf, ifp) {
+
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_if *gm_ifp;
+ struct gm_sg *sg;
+
+ if (!pim_ifp)
+ continue;
+
+ gm_ifp = pim_ifp->mld;
+ if (!gm_ifp)
+ continue;
+
+ /* scan mld groups */
+ frr_each (gm_sgs, gm_ifp->sgs, sg) {
+
+ if (uj) {
+ json_object_object_get_ex(json, ifp->name,
+ &json_iface);
+
+ if (!json_iface) {
+ json_iface = json_object_new_object();
+ json_object_pim_ifp_add(json_iface,
+ ifp);
+ json_object_object_add(json, ifp->name,
+ json_iface);
+ json_groups = json_object_new_array();
+ json_object_object_add(json_iface,
+ "groups",
+ json_groups);
+ }
+
+ json_group = json_object_new_object();
+ json_object_string_addf(json_group, "group",
+ "%pPAs",
+ &sg->sgaddr.grp);
+
+ json_object_int_add(json_group, "version",
+ pim_ifp->mld_version);
+ json_object_string_addf(json_group, "uptime",
+ "%pTVMs", &sg->created);
+ json_object_array_add(json_groups, json_group);
+ } else {
+ ttable_add_row(tt, "%s|%pPAs|%d|%pTVMs",
+ ifp->name, &sg->sgaddr.grp,
+ pim_ifp->mld_version,
+ &sg->created);
+ }
+ } /* scan gm groups */
+ } /* scan interfaces */
+
+ if (uj)
+ vty_json(vty, json);
+ else {
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+DEFPY(gm_show_mld_groups,
+ gm_show_mld_groups_cmd,
+ "show ipv6 mld [vrf <VRF|all>$vrf_str] groups [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MLD_STR
+ VRF_FULL_CMD_HELP_STR
+ MLD_GROUP_STR
+ JSON_STR)
+{
+ int ret = CMD_SUCCESS;
+ struct vrf *vrf;
+
+ vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
+ if (ret != CMD_SUCCESS)
+ return ret;
+
+ if (vrf)
+ gm_show_groups(vty, vrf, !!json);
+ else
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
+ gm_show_groups(vty, vrf, !!json);
+
+ return CMD_SUCCESS;
+}
+
DEFPY(gm_debug_show,
gm_debug_show_cmd,
"debug show mld interface IFNAME",
@@ -3021,6 +3149,7 @@ void gm_cli_init(void)
install_element(VIEW_NODE, &gm_show_interface_cmd);
install_element(VIEW_NODE, &gm_show_interface_stats_cmd);
install_element(VIEW_NODE, &gm_show_interface_joins_cmd);
+ install_element(VIEW_NODE, &gm_show_mld_groups_cmd);
install_element(VIEW_NODE, &gm_debug_show_cmd);
install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd);
diff --git a/pimd/pim_addr.h b/pimd/pim_addr.h
index 2f2ff24675..defe4070cf 100644
--- a/pimd/pim_addr.h
+++ b/pimd/pim_addr.h
@@ -38,6 +38,7 @@ typedef struct in_addr pim_addr;
#define PIM_AF_DBG "pim"
#define PIM_MROUTE_DBG "mroute"
#define PIMREG "pimreg"
+#define GM "IGMP"
#define PIM_ADDR_FUNCNAME(name) ipv4_##name
@@ -64,6 +65,7 @@ typedef struct in6_addr pim_addr;
#define PIM_AF_DBG "pimv6"
#define PIM_MROUTE_DBG "mroute6"
#define PIMREG "pim6reg"
+#define GM "MLD"
#define PIM_ADDR_FUNCNAME(name) ipv6_##name
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index c2453efa06..f0b6037db9 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -1129,11 +1129,11 @@ static void igmp_show_groups(struct pim_instance *pim, struct vty *vty, bool uj)
if (uj) {
json = json_object_new_object();
- json_object_int_add(json, "totalGroups", pim->igmp_group_count);
+ json_object_int_add(json, "totalGroups", pim->gm_group_count);
json_object_int_add(json, "watermarkLimit",
pim->gm_watermark_limit);
} else {
- vty_out(vty, "Total IGMP groups: %u\n", pim->igmp_group_count);
+ vty_out(vty, "Total IGMP groups: %u\n", pim->gm_group_count);
vty_out(vty, "Watermark warn limit(%s): %u\n",
pim->gm_watermark_limit ? "Set" : "Not Set",
pim->gm_watermark_limit);
diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c
index 1d3f5f430a..ff77b856fb 100644
--- a/pimd/pim_cmd_common.c
+++ b/pimd/pim_cmd_common.c
@@ -888,6 +888,8 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
{
struct pim_upstream *up;
time_t now = pim_time_monotonic_sec();
+ struct ttable *tt = NULL;
+ char *table = NULL;
json_object *json_group = NULL;
json_object *json_row = NULL;
@@ -895,8 +897,15 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
if (!json) {
vty_out(vty, "\n");
- vty_out(vty,
- "Source Group RpfIface RpfAddress RibNextHop Metric Pref\n");
+
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Source|Group|RpfIface|RpfAddress|RibNextHop|Metric|Pref");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
}
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
@@ -944,8 +953,8 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
json_object_object_add(json_group, src_str, json_row);
} else {
- vty_out(vty,
- "%-15pPAs %-15pPAs %-16s %-15pPA %-15pPAs %6d %4d\n",
+ ttable_add_row(
+ tt, "%pPAs|%pPAs|%s|%pPA|%pPAs|%d|%d",
&up->sg.src, &up->sg.grp, rpf_ifname,
&rpf->rpf_addr,
&rpf->source_nexthop.mrib_nexthop_addr,
@@ -953,14 +962,27 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
rpf->source_nexthop.mrib_metric_preference);
}
}
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
}
void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty)
{
struct interface *ifp;
+ struct ttable *tt = NULL;
+ char *table = NULL;
- vty_out(vty,
- "Interface Address Neighbor Secondary \n");
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Interface|Address|Neighbor|Secondary");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
FOR_ALL_INTERFACES (pim->vrf, ifp) {
struct pim_interface *pim_ifp;
@@ -988,12 +1010,16 @@ void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty)
for (ALL_LIST_ELEMENTS_RO(neigh->prefix_list,
prefix_node, p))
- vty_out(vty,
- "%-16s %-15pPAs %-15pPAs %-15pFX\n",
- ifp->name, &ifaddr, &neigh->source_addr,
- p);
+ ttable_add_row(tt, "%s|%pPAs|%pPAs|%pFX",
+ ifp->name, &ifaddr,
+ &neigh->source_addr, p);
}
}
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
}
void pim_show_state(struct pim_instance *pim, struct vty *vty,
@@ -1317,15 +1343,24 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
pim_sgaddr *sg, json_object *json)
{
struct pim_upstream *up;
+ struct ttable *tt = NULL;
+ char *table = NULL;
time_t now;
json_object *json_group = NULL;
json_object *json_row = NULL;
now = pim_time_monotonic_sec();
- if (!json)
- vty_out(vty,
- "Iif Source Group State Uptime JoinTimer RSTimer KATimer RefCnt\n");
+ if (!json) {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Iif|Source|Group|State|Uptime|JoinTimer|RSTimer|KATimer|RefCnt");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
char uptime[10];
@@ -1446,8 +1481,8 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
json_object_int_add(json_row, "sptBit", up->sptbit);
json_object_object_add(json_group, src_str, json_row);
} else {
- vty_out(vty,
- "%-16s%-15pPAs %-15pPAs %-11s %-8s %-9s %-9s %-9s %6d\n",
+ ttable_add_row(tt,
+ "%s|%pPAs|%pPAs|%s|%s|%s|%s|%s|%d",
up->rpf.source_nexthop.interface
? up->rpf.source_nexthop.interface->name
: "Unknown",
@@ -1455,12 +1490,20 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
join_timer, rs_timer, ka_timer, up->ref_count);
}
}
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
}
static void pim_show_join_desired_helper(struct pim_instance *pim,
struct vty *vty,
struct pim_upstream *up,
- json_object *json, bool uj)
+ json_object *json, bool uj,
+ struct ttable *tt)
{
json_object *json_group = NULL;
json_object *json_row = NULL;
@@ -1491,45 +1534,68 @@ static void pim_show_join_desired_helper(struct pim_instance *pim,
json_object_object_add(json_group, src_str, json_row);
} else {
- vty_out(vty, "%-15pPAs %-15pPAs %-6s\n", &up->sg.src,
- &up->sg.grp,
- pim_upstream_evaluate_join_desired(pim, up) ? "yes"
- : "no");
+ ttable_add_row(tt, "%pPAs|%pPAs|%s", &up->sg.src, &up->sg.grp,
+ pim_upstream_evaluate_join_desired(pim, up)
+ ? "yes"
+ : "no");
}
}
void pim_show_join_desired(struct pim_instance *pim, struct vty *vty, bool uj)
{
struct pim_upstream *up;
+ struct ttable *tt = NULL;
+ char *table = NULL;
json_object *json = NULL;
if (uj)
json = json_object_new_object();
- else
- vty_out(vty, "Source Group EvalJD\n");
+ else {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Source|Group|EvalJD");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
/* scan all interfaces */
- pim_show_join_desired_helper(pim, vty, up, json, uj);
+ pim_show_join_desired_helper(pim, vty, up, json, uj, tt);
}
if (uj)
vty_json(vty, json);
+ else {
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
}
void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj)
{
struct pim_upstream *up;
+ struct ttable *tt = NULL;
+ char *table = NULL;
json_object *json = NULL;
json_object *json_group = NULL;
json_object *json_row = NULL;
if (uj)
json = json_object_new_object();
- else
- vty_out(vty,
- "Source Group RpfIface RibNextHop RpfAddress \n");
+ else {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt,
+ "Source|Group|RpfIface|RibNextHop|RpfAddress");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
struct pim_rpf *rpf;
@@ -1571,16 +1637,22 @@ void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj)
&rpf->rpf_addr);
json_object_object_add(json_group, src_str, json_row);
} else {
- vty_out(vty,
- "%-15pPAs %-15pPAs %-16s %-15pPA %-15pPA\n",
- &up->sg.src, &up->sg.grp, rpf_ifname,
- &rpf->source_nexthop.mrib_nexthop_addr,
- &rpf->rpf_addr);
+ ttable_add_row(tt, "%pPAs|%pPAs|%s|%pPA|%pPA",
+ &up->sg.src, &up->sg.grp, rpf_ifname,
+ &rpf->source_nexthop.mrib_nexthop_addr,
+ &rpf->rpf_addr);
}
}
if (uj)
vty_json(vty, json);
+ else {
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
}
static void pim_show_join_helper(struct vty *vty, struct pim_interface *pim_ifp,
@@ -1755,13 +1827,14 @@ void pim_show_join(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
}
}
-static void pim_show_jp_agg_helper(struct vty *vty, struct interface *ifp,
+static void pim_show_jp_agg_helper(struct interface *ifp,
struct pim_neighbor *neigh,
- struct pim_upstream *up, int is_join)
+ struct pim_upstream *up, int is_join,
+ struct ttable *tt)
{
- vty_out(vty, "%-16s %-15pPAs %-15pPAs %-15pPAs %5s\n", ifp->name,
- &neigh->source_addr, &up->sg.src, &up->sg.grp,
- is_join ? "J" : "P");
+ ttable_add_row(tt, "%s|%pPAs|%pPAs|%pPAs|%s", ifp->name,
+ &neigh->source_addr, &up->sg.src, &up->sg.grp,
+ is_join ? "J" : "P");
}
int pim_show_jp_agg_list_cmd_helper(const char *vrf, struct vty *vty)
@@ -1797,9 +1870,15 @@ void pim_show_jp_agg_list(struct pim_instance *pim, struct vty *vty)
struct pim_jp_agg_group *jag;
struct listnode *js_node;
struct pim_jp_sources *js;
+ struct ttable *tt;
+ char *table;
- vty_out(vty,
- "Interface RPF Nbr Source Group State\n");
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Interface|RPF Nbr|Source|Group|State");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
FOR_ALL_INTERFACES (pim->vrf, ifp) {
pim_ifp = ifp->info;
@@ -1812,13 +1891,19 @@ void pim_show_jp_agg_list(struct pim_instance *pim, struct vty *vty)
jag_node, jag)) {
for (ALL_LIST_ELEMENTS_RO(jag->sources, js_node,
js)) {
- pim_show_jp_agg_helper(vty, ifp, neigh,
+ pim_show_jp_agg_helper(ifp, neigh,
js->up,
- js->is_join);
+ js->is_join, tt);
}
}
}
}
+
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
}
int pim_show_membership_cmd_helper(const char *vrf, struct vty *vty, bool uj)
@@ -1953,10 +2038,10 @@ void pim_show_membership(struct pim_instance *pim, struct vty *vty, bool uj)
}
}
-static void pim_show_channel_helper(struct pim_instance *pim, struct vty *vty,
+static void pim_show_channel_helper(struct pim_instance *pim,
struct pim_interface *pim_ifp,
struct pim_ifchannel *ch, json_object *json,
- bool uj)
+ bool uj, struct ttable *tt)
{
struct pim_upstream *up = ch->upstream;
json_object *json_group = NULL;
@@ -1999,17 +2084,17 @@ static void pim_show_channel_helper(struct pim_instance *pim, struct vty *vty,
&up->sg.src);
} else {
- vty_out(vty,
- "%-16s %-15pPAs %-15pPAs %-10s %-5s %-10s %-11s %-6s\n",
- ch->interface->name, &up->sg.src, &up->sg.grp,
- pim_macro_ch_lost_assert(ch) ? "yes" : "no",
- pim_macro_chisin_joins(ch) ? "yes" : "no",
- pim_macro_chisin_pim_include(ch) ? "yes" : "no",
- PIM_UPSTREAM_FLAG_TEST_DR_JOIN_DESIRED(up->flags)
- ? "yes"
- : "no",
- pim_upstream_evaluate_join_desired(pim, up) ? "yes"
- : "no");
+ ttable_add_row(tt, "%s|%pPAs|%pPAs|%s|%s|%s|%s|%s",
+ ch->interface->name, &up->sg.src, &up->sg.grp,
+ pim_macro_ch_lost_assert(ch) ? "yes" : "no",
+ pim_macro_chisin_joins(ch) ? "yes" : "no",
+ pim_macro_chisin_pim_include(ch) ? "yes" : "no",
+ PIM_UPSTREAM_FLAG_TEST_DR_JOIN_DESIRED(up->flags)
+ ? "yes"
+ : "no",
+ pim_upstream_evaluate_join_desired(pim, up)
+ ? "yes"
+ : "no");
}
}
@@ -2018,14 +2103,22 @@ void pim_show_channel(struct pim_instance *pim, struct vty *vty, bool uj)
struct pim_interface *pim_ifp;
struct pim_ifchannel *ch;
struct interface *ifp;
-
+ struct ttable *tt = NULL;
json_object *json = NULL;
+ char *table = NULL;
if (uj)
json = json_object_new_object();
- else
- vty_out(vty,
- "Interface Source Group LostAssert Joins PimInclude JoinDesired EvalJD\n");
+ else {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Interface|Source|Group|LostAssert|Joins|PimInclude|JoinDesired|EvalJD");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
/* scan per-interface (S,G) state */
FOR_ALL_INTERFACES (pim->vrf, ifp) {
@@ -2033,16 +2126,21 @@ void pim_show_channel(struct pim_instance *pim, struct vty *vty, bool uj)
if (!pim_ifp)
continue;
-
RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
/* scan all interfaces */
- pim_show_channel_helper(pim, vty, pim_ifp, ch, json,
- uj);
+ pim_show_channel_helper(pim, pim_ifp, ch, json, uj, tt);
}
}
if (uj)
vty_json(vty, json);
+ else {
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
}
int pim_show_channel_cmd_helper(const char *vrf, struct vty *vty, bool uj)
@@ -3817,7 +3915,6 @@ void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
static void show_mroute_count_per_channel_oil(struct channel_oil *c_oil,
json_object *json,
- struct vty *vty,
struct ttable *tt)
{
json_object *json_group = NULL;
@@ -3885,10 +3982,10 @@ void show_mroute_count(struct pim_instance *pim, struct vty *vty,
/* Print PIM and IGMP route counts */
frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil)
- show_mroute_count_per_channel_oil(c_oil, json, vty, tt);
+ show_mroute_count_per_channel_oil(c_oil, json, tt);
for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, sr))
- show_mroute_count_per_channel_oil(&sr->c_oil, json, vty, tt);
+ show_mroute_count_per_channel_oil(&sr->c_oil, json, tt);
/* Dump the generated table. */
if (!json) {
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index 0fb5e8c6d9..e03e5a2630 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -115,7 +115,7 @@ static int pim_sec_addr_comp(const void *p1, const void *p2)
return 0;
}
-struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
+struct pim_interface *pim_if_new(struct interface *ifp, bool gm, bool pim,
bool ispimreg, bool is_vxlan_term)
{
struct pim_interface *pim_ifp;
@@ -154,9 +154,7 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
pim_ifp->pim_enable = pim;
pim_ifp->pim_passive_enable = false;
-#if PIM_IPV == 4
- pim_ifp->gm_enable = igmp;
-#endif
+ pim_ifp->gm_enable = gm;
pim_ifp->gm_join_list = NULL;
pim_ifp->pim_neighbor_list = NULL;
@@ -810,7 +808,7 @@ void pim_if_addr_add_all(struct interface *ifp)
ifp->name);
}
/*
- * PIM or IGMP is enabled on interface, and there is at least one
+ * PIM or IGMP/MLD is enabled on interface, and there is at least one
* address assigned, then try to create a vif_index.
*/
if (pim_ifp->mroute_vif_index < 0) {
diff --git a/pimd/pim_igmp.c b/pimd/pim_igmp.c
index 6ffeeb9657..fdc56fd3f3 100644
--- a/pimd/pim_igmp.c
+++ b/pimd/pim_igmp.c
@@ -1008,12 +1008,11 @@ static void igmp_group_count_incr(struct pim_interface *pim_ifp)
{
uint32_t group_count = listcount(pim_ifp->gm_group_list);
- ++pim_ifp->pim->igmp_group_count;
- if (pim_ifp->pim->igmp_group_count ==
- pim_ifp->pim->gm_watermark_limit) {
+ ++pim_ifp->pim->gm_group_count;
+ if (pim_ifp->pim->gm_group_count == pim_ifp->pim->gm_watermark_limit) {
zlog_warn(
"IGMP group count reached watermark limit: %u(vrf: %s)",
- pim_ifp->pim->igmp_group_count,
+ pim_ifp->pim->gm_group_count,
VRF_LOGNAME(pim_ifp->pim->vrf));
}
@@ -1023,13 +1022,13 @@ static void igmp_group_count_incr(struct pim_interface *pim_ifp)
static void igmp_group_count_decr(struct pim_interface *pim_ifp)
{
- if (pim_ifp->pim->igmp_group_count == 0) {
+ if (pim_ifp->pim->gm_group_count == 0) {
zlog_warn("Cannot decrement igmp group count below 0(vrf: %s)",
VRF_LOGNAME(pim_ifp->pim->vrf));
return;
}
- --pim_ifp->pim->igmp_group_count;
+ --pim_ifp->pim->gm_group_count;
}
void igmp_group_delete(struct gm_group *group)
diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h
index 0da881557c..57bc74efb4 100644
--- a/pimd/pim_instance.h
+++ b/pimd/pim_instance.h
@@ -173,7 +173,7 @@ struct pim_instance {
int gm_socket;
struct thread *t_gm_recv;
- unsigned int igmp_group_count;
+ unsigned int gm_group_count;
unsigned int gm_watermark_limit;
unsigned int keep_alive_time;
unsigned int rp_keep_alive_time;
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index 408e86b698..72b16a5f49 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -348,8 +348,7 @@ static bool is_pim_interface(const struct lyd_node *dnode)
return false;
}
-#if PIM_IPV == 4
-static int pim_cmd_igmp_start(struct interface *ifp)
+static int pim_cmd_gm_start(struct interface *ifp)
{
struct pim_interface *pim_ifp;
uint8_t need_startup = 0;
@@ -377,7 +376,6 @@ static int pim_cmd_igmp_start(struct interface *ifp)
return NB_OK;
}
-#endif /* PIM_IPV == 4 */
/*
* CLI reconfiguration affects the interface level (struct pim_interface).
@@ -2584,7 +2582,6 @@ int lib_interface_gmp_address_family_destroy(struct nb_cb_destroy_args *args)
int lib_interface_gmp_address_family_enable_modify(
struct nb_cb_modify_args *args)
{
-#if PIM_IPV == 4
struct interface *ifp;
bool gm_enable;
struct pim_interface *pim_ifp;
@@ -2600,9 +2597,10 @@ int lib_interface_gmp_address_family_enable_modify(
/* Limiting mcast interfaces to number of VIFs */
if (mcast_if_count == MAXVIFS) {
ifp_name = yang_dnode_get_string(if_dnode, "name");
- snprintf(args->errmsg, args->errmsg_len,
- "Max multicast interfaces(%d) Reached. Could not enable IGMP on interface %s",
- MAXVIFS, ifp_name);
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "Max multicast interfaces(%d) Reached. Could not enable %s on interface %s",
+ MAXVIFS, GM, ifp_name);
return NB_ERR_VALIDATION;
}
break;
@@ -2614,7 +2612,7 @@ int lib_interface_gmp_address_family_enable_modify(
gm_enable = yang_dnode_get_bool(args->dnode, NULL);
if (gm_enable)
- return pim_cmd_igmp_start(ifp);
+ return pim_cmd_gm_start(ifp);
else {
pim_ifp = ifp->info;
@@ -2626,15 +2624,16 @@ int lib_interface_gmp_address_family_enable_modify(
pim_if_membership_clear(ifp);
+#if PIM_IPV == 4
pim_if_addr_del_all_igmp(ifp);
+#else
+ gm_ifp_teardown(ifp);
+#endif
if (!pim_ifp->pim_enable)
pim_if_delete(ifp);
}
}
-#else
- /* TBD Depends on MLD data structure changes */
-#endif /* PIM_IPV == 4 */
return NB_OK;
}
diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c
index 783c9b97e7..1dce6b3562 100644
--- a/pimd/pim_rp.c
+++ b/pimd/pim_rp.c
@@ -51,6 +51,7 @@
#include "pim_bsm.h"
#include "pim_util.h"
#include "pim_ssm.h"
+#include "termtable.h"
/* Cleanup pim->rpf_hash each node data */
void pim_rp_list_hash_clean(void *data)
@@ -1166,14 +1167,25 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
struct rp_info *rp_info;
struct rp_info *prev_rp_info = NULL;
struct listnode *node;
+ struct ttable *tt = NULL;
+ char *table = NULL;
char source[7];
+ char grp[INET6_ADDRSTRLEN];
json_object *json_rp_rows = NULL;
json_object *json_row = NULL;
- if (!json)
- vty_out(vty,
- "RP address group/prefix-list OIF I am RP Source Group-Type\n");
+ if (!json) {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "RP address|group/prefix-list|OIF|I am RP|Source|Group-Type");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
continue;
@@ -1243,32 +1255,31 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
json_object_array_add(json_rp_rows, json_row);
} else {
- vty_out(vty, "%-15pPA ", &rp_info->rp.rpf_addr);
-
- if (rp_info->plist)
- vty_out(vty, "%-18s ", rp_info->plist);
- else
- vty_out(vty, "%-18pFX ", &rp_info->group);
-
- if (rp_info->rp.source_nexthop.interface)
- vty_out(vty, "%-16s ",
- rp_info->rp.source_nexthop
- .interface->name);
- else
- vty_out(vty, "%-16s ", "(Unknown)");
-
- if (rp_info->i_am_rp)
- vty_out(vty, "yes");
- else
- vty_out(vty, "no");
-
- vty_out(vty, "%14s", source);
- vty_out(vty, "%6s\n", group_type);
+ prefix2str(&rp_info->group, grp, sizeof(grp));
+ ttable_add_row(tt, "%pPA|%s|%s|%s|%s|%s",
+ &rp_info->rp.rpf_addr,
+ rp_info->plist
+ ? rp_info->plist
+ : grp,
+ rp_info->rp.source_nexthop.interface
+ ? rp_info->rp.source_nexthop
+ .interface->name
+ : "Unknown",
+ rp_info->i_am_rp
+ ? "yes"
+ : "no",
+ source, group_type);
}
prev_rp_info = rp_info;
}
- if (json) {
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ } else {
if (prev_rp_info && json_rp_rows)
json_object_object_addf(json, json_rp_rows, "%pPA",
&prev_rp_info->rp.rpf_addr);
diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c
index 3d5d68b1f4..cfbd436981 100644
--- a/pimd/pim_vty.c
+++ b/pimd/pim_vty.c
@@ -370,6 +370,12 @@ static int gm_config_write(struct vty *vty, int writes,
static int gm_config_write(struct vty *vty, int writes,
struct pim_interface *pim_ifp)
{
+ /* IF ipv6 mld */
+ if (pim_ifp->gm_enable) {
+ vty_out(vty, " ipv6 mld\n");
+ ++writes;
+ }
+
if (pim_ifp->mld_version != MLD_DEFAULT_VERSION)
vty_out(vty, " ipv6 mld version %d\n", pim_ifp->mld_version);
if (pim_ifp->gm_default_query_interval != IGMP_GENERAL_QUERY_INTERVAL)
diff --git a/ripd/ripd.c b/ripd/ripd.c
index 9798186036..c3a9369a06 100644
--- a/ripd/ripd.c
+++ b/ripd/ripd.c
@@ -996,6 +996,7 @@ static size_t rip_auth_md5_ah_write(struct stream *s, struct rip_interface *ri,
struct key *key)
{
size_t doff = 0;
+ static uint32_t seq = 0;
assert(s && ri && ri->auth_type == RIP_AUTH_MD5);
@@ -1028,7 +1029,7 @@ static size_t rip_auth_md5_ah_write(struct stream *s, struct rip_interface *ri,
/* RFC2080: The value used in the sequence number is
arbitrary, but two suggestions are the time of the
message's creation or a simple message counter. */
- stream_putl(s, time(NULL));
+ stream_putl(s, ++seq);
/* Reserved field must be zero. */
stream_putl(s, 0);
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py
index c8cdc7ec5c..4d7f436eac 100644
--- a/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py
+++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py
@@ -22,6 +22,7 @@
Following tests are covered.
1. Verify default-originate route with default static and network command
2. Verify default-originate route with aggregate summary command
+3. Verfiy default-originate behaviour in ecmp
"""
import os
import sys
@@ -48,7 +49,10 @@ from lib.bgp import (
from lib.common_config import (
verify_fib_routes,
step,
+ create_prefix_lists,
run_frr_cmd,
+ create_route_maps,
+ shutdown_bringup_interface,
get_frr_ipv6_linklocal,
start_topology,
apply_raw_config,
@@ -296,6 +300,78 @@ def verify_the_uptime(time_stamp_before, time_stamp_after, incremented=None):
return True
+def get_best_path_route_in_FIB(tgen, topo, dut, network):
+ """
+ API to verify the best route in FIB and return the ipv4 and ipv6 nexthop for the given route
+ command
+ =======
+ show ip route
+ show ipv6 route
+ params
+ ======
+ dut : device under test :
+ network ; route (ip) to which the best route to be retrieved
+ Returns
+ ========
+ on success : return dict with next hops for the best hop
+ on failure : return error message with boolean False
+ """
+ is_ipv4_best_path_found = False
+ is_ipv6_best_path_found = False
+ rnode = tgen.routers()[dut]
+ ipv4_show_bgp_json = run_frr_cmd(rnode, "sh ip bgp json ", isjson=True)
+ ipv6_show_bgp_json = run_frr_cmd(
+ rnode, "sh ip bgp ipv6 unicast json ", isjson=True
+ )
+ output_dict = {"ipv4": None, "ipv6": None}
+ ipv4_nxt_hop_count = len(ipv4_show_bgp_json["routes"][network["ipv4"]])
+ for index in range(ipv4_nxt_hop_count):
+ if "bestpath" in ipv4_show_bgp_json["routes"][network["ipv4"]][index].keys():
+ best_path_ip = ipv4_show_bgp_json["routes"][network["ipv4"]][index][
+ "nexthops"
+ ][0]["ip"]
+ output_dict["ipv4"] = best_path_ip
+ logger.info(
+ "[DUT [{}]] Best path for the route {} is {} ".format(
+ dut, network["ipv4"], best_path_ip
+ )
+ )
+ is_ipv4_best_path_found = True
+ else:
+ logger.error("ERROR....! No Best Path Found in BGP RIB.... FAILED")
+
+ ipv6_nxt_hop_count = len(ipv6_show_bgp_json["routes"][network["ipv6"]])
+ for index in range(ipv6_nxt_hop_count):
+ if "bestpath" in ipv6_show_bgp_json["routes"][network["ipv6"]][index].keys():
+ ip_add_count = len(
+ ipv6_show_bgp_json["routes"][network["ipv6"]][index]["nexthops"]
+ )
+ for i_index in range(ip_add_count):
+ if (
+ "global"
+ in ipv6_show_bgp_json["routes"][network["ipv6"]][index]["nexthops"][
+ i_index
+ ]["scope"]
+ ):
+ best_path_ip = ipv6_show_bgp_json["routes"][network["ipv6"]][index][
+ "nexthops"
+ ][i_index]["ip"]
+ output_dict["ipv6"] = best_path_ip
+ logger.info(
+ "[DUT [{}]] Best path for the route {} is {} ".format(
+ dut, network["ipv6"], best_path_ip
+ )
+ )
+
+ else:
+ logger.error("ERROR....! No Best Path Found in BGP RIB.... FAILED")
+ if is_ipv4_best_path_found:
+ return output_dict
+ else:
+ logger.error("ERROR...! Unable to find the Best Path in the RIB")
+ return False
+
+
#####################################################
#
# Testcases
@@ -1409,6 +1485,326 @@ def test_verify_bgp_default_originate_with_aggregate_summary_p1(request):
write_test_footer(tc_name)
+def test_verify_default_originate_with_2way_ecmp_p2(request):
+ """
+ Summary: "Verify default-originate route with 3 way ECMP and traffic "
+ """
+
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global DEFAULT_ROUTES
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ step("Populating next-hops details")
+ r1_r2_ipv4_neighbor_ips = []
+ r1_r2_ipv6_neighbor_ips = []
+ r1_link = None
+ for index in range(1, 3):
+ r1_link = "r1-link" + str(index)
+ r1_r2_ipv4_neighbor_ips.append(
+ topo["routers"]["r2"]["links"][r1_link]["ipv4"].split("/")[0]
+ )
+ r1_r2_ipv6_neighbor_ips.append(
+ topo["routers"]["r2"]["links"][r1_link]["ipv6"].split("/")[0]
+ )
+
+ step(
+ "Configure default-originate on R1 for all the neighbor of IPv4 and IPv6 peers "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ for index in range(2):
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "router bgp {}".format(local_as),
+ "address-family ipv4 unicast",
+ "neighbor {} default-originate".format(
+ r1_r2_ipv4_neighbor_ips[index]
+ ),
+ "exit-address-family",
+ "address-family ipv6 unicast",
+ "neighbor {} default-originate ".format(
+ r1_r2_ipv6_neighbor_ips[index]
+ ),
+ "exit-address-family",
+ ]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 "
+ )
+
+ r2_link = None
+ for index in range(1, 3):
+ r2_link = "r2-link" + str(index)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"][r2_link]["ipv4"].split("/")[0]
+ interface = topo["routers"]["r1"]["links"][r2_link]["interface"]
+ ipv6_link_local_nxt_hop = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local_nxt_hop}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Ping R1 configure IPv4 and IPv6 loopback address from R2")
+ pingaddr = topo["routers"]["r1"]["links"]["lo"]["ipv4"].split("/")[0]
+ router = tgen.gears["r2"]
+ output = router.run("ping -c 4 -w 4 {}".format(pingaddr))
+ assert " 0% packet loss" in output, "Ping R1->R2 FAILED"
+ logger.info("Ping from R1 to R2 ... success")
+
+ step("Shuting up the active route")
+ network = {"ipv4": "0.0.0.0/0", "ipv6": "::/0"}
+ ipv_dict = get_best_path_route_in_FIB(tgen, topo, dut="r2", network=network)
+ dut_links = topo["routers"]["r1"]["links"]
+ active_interface = None
+ for key, values in dut_links.items():
+ ipv4_address = dut_links[key]["ipv4"].split("/")[0]
+ ipv6_address = dut_links[key]["ipv6"].split("/")[0]
+ if ipv_dict["ipv4"] == ipv4_address and ipv_dict["ipv6"] == ipv6_address:
+ active_interface = dut_links[key]["interface"]
+
+ logger.info(
+ "Shutting down the interface {} on router {} ".format(active_interface, "r1")
+ )
+ shutdown_bringup_interface(tgen, "r1", active_interface, False)
+
+ step("Verify the complete convergence to fail after shutting the interface")
+ result = verify_bgp_convergence(tgen, topo, expected=False)
+ assert (
+ result is not True
+ ), " Testcase {} : After shuting down the interface Convergence is expected to be Failed".format(
+ tc_name
+ )
+
+ step(
+ "Verify routes from active best path is not received from r1 after shuting the interface"
+ )
+ r2_link = None
+ for index in range(1, 3):
+ r2_link = "r2-link" + str(index)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"][r2_link]["ipv4"].split("/")[0]
+ interface = topo["routers"]["r1"]["links"][r2_link]["interface"]
+ ipv6_link_local_nxt_hop = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local_nxt_hop}
+ if index == 1:
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ else:
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Ping R1 configure IPv4 and IPv6 loopback address from R2")
+ pingaddr = topo["routers"]["r1"]["links"]["lo"]["ipv4"].split("/")[0]
+ router = tgen.gears["r2"]
+ output = router.run("ping -c 4 -w 4 {}".format(pingaddr))
+ assert " 0% packet loss" in output, "Ping R1->R2 FAILED"
+ logger.info("Ping from R1 to R2 ... success")
+
+ step("No Shuting up the active route")
+
+ shutdown_bringup_interface(tgen, "r1", active_interface, True)
+
+ step("Verify the complete convergence after bringup the interface")
+ result = verify_bgp_convergence(tgen, topo)
+ assert (
+ result is True
+ ), " Testcase {} : After bringing up the interface complete convergence is expected ".format(
+ tc_name
+ )
+
+ step("Verify all the routes are received from r1 after no shuting the interface")
+ r2_link = None
+ for index in range(1, 3):
+ r2_link = "r2-link" + str(index)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"][r2_link]["ipv4"].split("/")[0]
+ interface = topo["routers"]["r1"]["links"][r2_link]["interface"]
+ ipv6_link_local_nxt_hop = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local_nxt_hop}
+ if index == 1:
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ else:
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure IPv4 and IPv6 route-map with deny option on R2 to filter default route 0.0.0.0/0 and 0::0/0"
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ input_dict_3 = {
+ "r2": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": DEFAULT_ROUTES["ipv4"],
+ "action": "permit",
+ }
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": DEFAULT_ROUTES["ipv6"],
+ "action": "permit",
+ }
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_3 = {
+ "r2": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "deny",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "deny",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Apply route-map IN direction of R2 ( R2-R1) for IPv4 and IPv6 BGP neighbors")
+ r2_link = None
+ for index in range(1, 3):
+ r2_link = "r2-link" + str(index)
+ input_dict_4 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ r2_link: {
+ "route_maps": [
+ {"name": "RMv4", "direction": "in"}
+ ]
+ },
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ r2_link: {
+ "route_maps": [
+ {"name": "RMv6", "direction": "in"}
+ ]
+ },
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("After applying the route-map the routes are not expected in RIB ")
+ r2_link = None
+ for index in range(1, 3):
+ r2_link = "r2-link" + str(index)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"][r2_link]["ipv4"].split("/")[0]
+ interface = topo["routers"]["r1"]["links"][r2_link]["interface"]
+ ipv6_link_local_nxt_hop = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local_nxt_hop}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py
index 120a3e82e4..f79ca71a64 100755
--- a/tests/topotests/conftest.py
+++ b/tests/topotests/conftest.py
@@ -363,7 +363,7 @@ def pytest_configure(config):
# Check environment now that we have config
if not diagnose_env(rundir):
- pytest.exit("environment has errors, please read the logs")
+ pytest.exit("environment has errors, please read the logs in %s" % rundir)
@pytest.fixture(autouse=True, scope="session")
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index c51a187f28..04712eda87 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -1293,7 +1293,7 @@ def diagnose_env_linux(rundir):
)
continue
- logger.warning("could not find {} in {}".format(fname, frrdir))
+ logger.error("could not find {} in {}".format(fname, frrdir))
ret = False
else:
if fname != "zebra":
diff --git a/tools/frrcommon.sh.in b/tools/frrcommon.sh.in
index 759d498379..b589ced965 100755
--- a/tools/frrcommon.sh.in
+++ b/tools/frrcommon.sh.in
@@ -272,7 +272,7 @@ all_start() {
}
all_stop() {
- local pids reversed
+ local pids reversed need_zebra
daemon_list enabled_daemons disabled_daemons
[ "$1" = "--reallyall" ] && enabled_daemons="$enabled_daemons $disabled_daemons"
@@ -282,13 +282,23 @@ all_stop() {
reversed="$dmninst $reversed"
done
+ # Stop zebra last, after trying to stop the other daemons
for dmninst in $reversed; do
+ if [ "$dmninst" = "zebra" ]; then
+ need_zebra="yes"
+ continue
+ fi
+
daemon_stop "$dmninst" "$1" &
pids="$pids $!"
done
for pid in $pids; do
wait $pid
done
+
+ if [ -n "$need_zebra" ]; then
+ daemon_stop "zebra"
+ fi
}
all_status() {
diff --git a/zebra/kernel_socket.c b/zebra/kernel_socket.c
index cb549339af..4c089ee194 100644
--- a/zebra/kernel_socket.c
+++ b/zebra/kernel_socket.c
@@ -1112,14 +1112,6 @@ void rtm_read(struct rt_msghdr *rtm)
} else
return;
- /*
- * CHANGE: delete the old prefix, we have no further information
- * to specify the route really
- */
- if (rtm->rtm_type == RTM_CHANGE)
- rib_delete(afi, SAFI_UNICAST, VRF_DEFAULT, ZEBRA_ROUTE_KERNEL,
- 0, zebra_flags, &p, NULL, NULL, 0, RT_TABLE_MAIN, 0,
- 0, true);
if (rtm->rtm_type == RTM_GET || rtm->rtm_type == RTM_ADD
|| rtm->rtm_type == RTM_CHANGE)
rib_add(afi, SAFI_UNICAST, VRF_DEFAULT, proto, 0, zebra_flags,
diff --git a/zebra/redistribute.c b/zebra/redistribute.c
index 1a28f8ceec..4a8fe938ed 100644
--- a/zebra/redistribute.c
+++ b/zebra/redistribute.c
@@ -685,15 +685,10 @@ int zebra_add_import_table_entry(struct zebra_vrf *zvrf, struct route_node *rn,
zebra_del_import_table_entry(zvrf, rn, same);
}
- newre = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
- newre->type = ZEBRA_ROUTE_TABLE;
- newre->distance = zebra_import_table_distance[afi][re->table];
- newre->flags = re->flags;
- newre->metric = re->metric;
- newre->mtu = re->mtu;
- newre->table = zvrf->table_id;
- newre->uptime = monotime(NULL);
- newre->instance = re->table;
+ newre = zebra_rib_route_entry_new(
+ 0, ZEBRA_ROUTE_TABLE, re->table, re->flags, re->nhe_id,
+ zvrf->table_id, re->metric, re->mtu,
+ zebra_import_table_distance[afi][re->table], re->tag);
ng = nexthop_group_new();
copy_nexthops(&ng->nexthop, re->nhe->nhg.nexthop, NULL);
diff --git a/zebra/rib.h b/zebra/rib.h
index a40843e27f..dec5b2b8d6 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -178,15 +178,17 @@ struct route_entry {
/* meta-queue structure:
* sub-queue 0: nexthop group objects
* sub-queue 1: EVPN/VxLAN objects
- * sub-queue 2: connected
- * sub-queue 3: kernel
- * sub-queue 4: static
- * sub-queue 5: RIP, RIPng, OSPF, OSPF6, IS-IS, EIGRP, NHRP
- * sub-queue 6: iBGP, eBGP
- * sub-queue 7: any other origin (if any) typically those that
+ * sub-queue 2: Early Route Processing
+ * sub-queue 3: Early Label Processing
+ * sub-queue 4: connected
+ * sub-queue 5: kernel
+ * sub-queue 6: static
+ * sub-queue 7: RIP, RIPng, OSPF, OSPF6, IS-IS, EIGRP, NHRP
+ * sub-queue 8: iBGP, eBGP
+ * sub-queue 9: any other origin (if any) typically those that
* don't generate routes
*/
-#define MQ_SIZE 8
+#define MQ_SIZE 10
struct meta_queue {
struct list *subq[MQ_SIZE];
uint32_t size; /* sum of lengths of all subqueues */
@@ -342,6 +344,12 @@ extern void _route_entry_dump(const char *func, union prefixconstptr pp,
union prefixconstptr src_pp,
const struct route_entry *re);
+struct route_entry *
+zebra_rib_route_entry_new(vrf_id_t vrf_id, int type, uint8_t instance,
+ uint32_t flags, uint32_t nhe_id, uint32_t table_id,
+ uint32_t metric, uint32_t mtu, uint8_t distance,
+ route_tag_t tag);
+
#define ZEBRA_RIB_LOOKUP_ERROR -1
#define ZEBRA_RIB_FOUND_EXACT 0
#define ZEBRA_RIB_FOUND_NOGATE 1
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index 0eab1fa850..e883033d59 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -937,44 +937,38 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id,
afi = AFI_IP6;
if (h->nlmsg_type == RTM_NEWROUTE) {
+ struct route_entry *re;
+ struct nexthop_group *ng = NULL;
+
+ re = zebra_rib_route_entry_new(vrf_id, proto, 0, flags, nhe_id,
+ table, metric, mtu, distance,
+ tag);
+ if (!nhe_id)
+ ng = nexthop_group_new();
if (!tb[RTA_MULTIPATH]) {
- struct nexthop nh = {0};
+ struct nexthop *nexthop, nh;
if (!nhe_id) {
nh = parse_nexthop_unicast(
ns_id, rtm, tb, bh_type, index, prefsrc,
gate, afi, vrf_id);
+
+ nexthop = nexthop_new();
+ *nexthop = nh;
+ nexthop_group_add_sorted(ng, nexthop);
}
- rib_add(afi, SAFI_UNICAST, vrf_id, proto, 0, flags, &p,
- &src_p, &nh, nhe_id, table, metric, mtu,
- distance, tag, startup);
} else {
/* This is a multipath route */
- struct route_entry *re;
- struct nexthop_group *ng = NULL;
struct rtnexthop *rtnh =
(struct rtnexthop *)RTA_DATA(tb[RTA_MULTIPATH]);
- re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
- re->type = proto;
- re->distance = distance;
- re->flags = flags;
- re->metric = metric;
- re->mtu = mtu;
- re->vrf_id = vrf_id;
- re->table = table;
- re->uptime = monotime(NULL);
- re->tag = tag;
- re->nhe_id = nhe_id;
-
if (!nhe_id) {
uint8_t nhop_num;
/* Use temporary list of nexthops; parse
* message payload's nexthops.
*/
- ng = nexthop_group_new();
nhop_num =
parse_multipath_nexthops_unicast(
ns_id, ng, rtm, rtnh, tb,
@@ -989,23 +983,22 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id,
ng = NULL;
}
}
-
- if (nhe_id || ng)
- rib_add_multipath(afi, SAFI_UNICAST, &p,
- &src_p, re, ng, startup);
- else {
- /*
- * I really don't see how this is possible
- * but since we are testing for it let's
- * let the end user know why the route
- * that was just received was swallowed
- * up and forgotten
- */
- zlog_err(
- "%s: %pFX multipath RTM_NEWROUTE has a invalid nexthop group from the kernel",
- __func__, &p);
- XFREE(MTYPE_RE, re);
- }
+ }
+ if (nhe_id || ng)
+ rib_add_multipath(afi, SAFI_UNICAST, &p, &src_p, re, ng,
+ startup);
+ else {
+ /*
+ * I really don't see how this is possible
+ * but since we are testing for it let's
+ * let the end user know why the route
+ * that was just received was swallowed
+ * up and forgotten
+ */
+ zlog_err(
+ "%s: %pFX multipath RTM_NEWROUTE has a invalid nexthop group from the kernel",
+ __func__, &p);
+ XFREE(MTYPE_RE, re);
}
} else {
if (nhe_id) {
diff --git a/zebra/tc_netlink.c b/zebra/tc_netlink.c
index 89ce075454..4fb0241d1d 100644
--- a/zebra/tc_netlink.c
+++ b/zebra/tc_netlink.c
@@ -294,7 +294,7 @@ static ssize_t netlink_tclass_msg_encode(int cmd, struct zebra_dplane_ctx *ctx,
htb_opt.cbuffer = cbuffer;
tc_calc_rate_table(&htb_opt.rate, rtab, mtu);
- tc_calc_rate_table(&htb_opt.ceil, rtab, mtu);
+ tc_calc_rate_table(&htb_opt.ceil, ctab, mtu);
htb_opt.ceil.mpu = htb_opt.rate.mpu = 0;
htb_opt.ceil.overhead = htb_opt.rate.overhead = 0;
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index a578395ef8..761ba789b8 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -2034,7 +2034,7 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
struct nhg_backup_info *bnhg = NULL;
int ret;
vrf_id_t vrf_id;
- struct nhg_hash_entry nhe;
+ struct nhg_hash_entry nhe, *n = NULL;
s = msg;
if (zapi_route_decode(s, &api) < 0) {
@@ -2052,17 +2052,10 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
(int)api.message, api.flags);
/* Allocate new route. */
- re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
- re->type = api.type;
- re->instance = api.instance;
- re->flags = api.flags;
- re->uptime = monotime(NULL);
- re->vrf_id = vrf_id;
-
- if (api.tableid)
- re->table = api.tableid;
- else
- re->table = zvrf->table_id;
+ re = zebra_rib_route_entry_new(
+ vrf_id, api.type, api.instance, api.flags, api.nhgid,
+ api.tableid ? api.tableid : zvrf->table_id, api.metric, api.mtu,
+ api.distance, api.tag);
if (!CHECK_FLAG(api.message, ZAPI_MESSAGE_NHG)
&& (!CHECK_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP)
@@ -2087,9 +2080,6 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
&api.prefix);
}
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_NHG))
- re->nhe_id = api.nhgid;
-
if (!re->nhe_id
&& (!zapi_read_nexthops(client, &api.prefix, api.nexthops,
api.flags, api.message, api.nexthop_num,
@@ -2105,15 +2095,6 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
return;
}
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_DISTANCE))
- re->distance = api.distance;
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_METRIC))
- re->metric = api.metric;
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_TAG))
- re->tag = api.tag;
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_MTU))
- re->mtu = api.mtu;
-
if (CHECK_FLAG(api.message, ZAPI_MESSAGE_OPAQUE)) {
re->opaque =
XMALLOC(MTYPE_RE_OPAQUE,
@@ -2161,9 +2142,10 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
zebra_nhe_init(&nhe, afi, ng->nexthop);
nhe.nhg.nexthop = ng->nexthop;
nhe.backup_info = bnhg;
+ n = zebra_nhe_copy(&nhe, 0);
}
- ret = rib_add_multipath_nhe(afi, api.safi, &api.prefix, src_p,
- re, &nhe, false);
+ ret = rib_add_multipath_nhe(afi, api.safi, &api.prefix, src_p, re, n,
+ false);
/*
* rib_add_multipath_nhe only fails in a couple spots
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 763c92ebb6..6a691a222f 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -2775,14 +2775,13 @@ int dplane_ctx_tc_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op)
{
int ret = EINVAL;
- struct zebra_vrf *zvrf = NULL;
struct zebra_ns *zns = NULL;
ctx->zd_op = op;
ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
/* TODO: init traffic control qdisc */
- zns = zvrf ? zvrf->zns : zebra_ns_lookup(NS_DEFAULT);
+ zns = zebra_ns_lookup(NS_DEFAULT);
dplane_ctx_ns_init(ctx, zns, true);
@@ -3513,7 +3512,7 @@ dplane_route_update_internal(struct route_node *rn,
static enum zebra_dplane_result dplane_tc_update_internal(enum dplane_op_e op)
{
enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
- int ret = EINVAL;
+ int ret;
struct zebra_dplane_ctx *ctx = NULL;
/* Obtain context block */
diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c
index 3010a516b9..9756d9ba08 100644
--- a/zebra/zebra_mpls.c
+++ b/zebra/zebra_mpls.c
@@ -2747,9 +2747,9 @@ static bool ftn_update_nexthop(bool add_p, struct nexthop *nexthop,
return true;
}
-void mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
- struct prefix *prefix, uint8_t route_type,
- unsigned short route_instance)
+void zebra_mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ struct prefix *prefix, uint8_t route_type,
+ uint8_t route_instance)
{
struct route_table *table;
struct route_node *rn;
@@ -2882,8 +2882,8 @@ static bool ftn_update_znh(bool add_p, enum lsp_types_t type,
* There are several changes that need to be made, in several zebra
* data structures, so we want to do all the work required at once.
*/
-void mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
- const struct zapi_labels *zl)
+void zebra_mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
+ const struct zapi_labels *zl)
{
int i, counter, ret = 0;
char buf[NEXTHOP_STRLEN];
diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h
index a114f01339..cf247861f8 100644
--- a/zebra/zebra_mpls.h
+++ b/zebra/zebra_mpls.h
@@ -260,17 +260,30 @@ void zebra_mpls_print_fec(struct vty *vty, struct zebra_vrf *zvrf,
/*
* Handle zapi request to install/uninstall LSP and
* (optionally) FEC-To-NHLFE (FTN) bindings.
+ *
+ * mpls_zapi_labels_process -> Installs for future processing
+ * in the meta-q
+ * zebra_mpls_labels_process -> called by the meta-q
*/
void mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
const struct zapi_labels *zl);
+void zebra_mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
+ const struct zapi_labels *zl);
/*
* Uninstall all NHLFEs bound to a single FEC.
+ *
+ * mpls_ftn_uninstall -> Called to enqueue into early label processing
+ * via the metaq
+ * zebra_mpls_ftn_uninstall -> Called when we process the meta q
+ * for this item
*/
void mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
struct prefix *prefix, uint8_t route_type,
- unsigned short route_instance);
-
+ uint8_t route_instance);
+void zebra_mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ struct prefix *prefix, uint8_t route_type,
+ uint8_t route_instance);
/*
* Install/update a NHLFE for an LSP in the forwarding table. This may be
* a new LSP entry or a new NHLFE for an existing in-label or an update of
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 03bda8cc33..bd7e8bbbd0 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -81,6 +81,8 @@ DEFINE_HOOK(rib_update, (struct route_node * rn, const char *reason),
enum meta_queue_indexes {
META_QUEUE_NHG,
META_QUEUE_EVPN,
+ META_QUEUE_EARLY_ROUTE,
+ META_QUEUE_EARLY_LABEL,
META_QUEUE_CONNECTED,
META_QUEUE_KERNEL,
META_QUEUE_STATIC,
@@ -173,6 +175,29 @@ struct wq_evpn_wrapper {
#define WQ_EVPN_WRAPPER_TYPE_REM_MACIP 0x03
#define WQ_EVPN_WRAPPER_TYPE_REM_VTEP 0x04
+enum wq_label_types {
+ WQ_LABEL_FTN_UNINSTALL,
+ WQ_LABEL_LABELS_PROCESS,
+};
+
+struct wq_label_wrapper {
+ enum wq_label_types type;
+ vrf_id_t vrf_id;
+
+ struct prefix p;
+ enum lsp_types_t ltype;
+ uint8_t route_type;
+ uint8_t route_instance;
+
+ bool add_p;
+ struct zapi_labels zl;
+
+ int afi;
+};
+
+static void rib_addnode(struct route_node *rn, struct route_entry *re,
+ int process);
+
/* %pRN is already a printer for route_nodes that just prints the prefix */
#ifdef _FRR_ATTRIBUTE_PRINTFRR
#pragma FRR printfrr_ext "%pZN" (struct route_node *)
@@ -185,6 +210,10 @@ static const char *subqueue2str(enum meta_queue_indexes index)
return "NHG Objects";
case META_QUEUE_EVPN:
return "EVPN/VxLan Objects";
+ case META_QUEUE_EARLY_ROUTE:
+ return "Early Route Processing";
+ case META_QUEUE_EARLY_LABEL:
+ return "Early Label Handling";
case META_QUEUE_CONNECTED:
return "Connected Routes";
case META_QUEUE_KERNEL:
@@ -2468,6 +2497,33 @@ static void process_subq_nhg(struct listnode *lnode)
XFREE(MTYPE_WQ_WRAPPER, w);
}
+static void process_subq_early_label(struct listnode *lnode)
+{
+ struct wq_label_wrapper *w = listgetdata(lnode);
+ struct zebra_vrf *zvrf;
+
+ if (!w)
+ return;
+
+ zvrf = vrf_info_lookup(w->vrf_id);
+ if (!zvrf) {
+ XFREE(MTYPE_WQ_WRAPPER, w);
+ return;
+ }
+
+ switch (w->type) {
+ case WQ_LABEL_FTN_UNINSTALL:
+ zebra_mpls_ftn_uninstall(zvrf, w->ltype, &w->p, w->route_type,
+ w->route_instance);
+ break;
+ case WQ_LABEL_LABELS_PROCESS:
+ zebra_mpls_zapi_labels_process(w->add_p, zvrf, &w->zl);
+ break;
+ }
+
+ XFREE(MTYPE_WQ_WRAPPER, w);
+}
+
static void process_subq_route(struct listnode *lnode, uint8_t qindex)
{
struct route_node *rnode = NULL;
@@ -2506,6 +2562,460 @@ static void process_subq_route(struct listnode *lnode, uint8_t qindex)
route_unlock_node(rnode);
}
+static void rib_re_nhg_free(struct route_entry *re)
+{
+ if (re->nhe && re->nhe_id) {
+ assert(re->nhe->id == re->nhe_id);
+ route_entry_update_nhe(re, NULL);
+ } else if (re->nhe && re->nhe->nhg.nexthop)
+ nexthops_free(re->nhe->nhg.nexthop);
+
+ nexthops_free(re->fib_ng.nexthop);
+}
+
+struct zebra_early_route {
+ afi_t afi;
+ safi_t safi;
+ struct prefix p;
+ struct prefix_ipv6 src_p;
+ bool src_p_provided;
+ struct route_entry *re;
+ struct nhg_hash_entry *re_nhe;
+ bool startup;
+ bool deletion;
+ bool fromkernel;
+};
+
+static void early_route_memory_free(struct zebra_early_route *ere)
+{
+ if (ere->re_nhe)
+ zebra_nhg_free(ere->re_nhe);
+
+ XFREE(MTYPE_RE, ere->re);
+ XFREE(MTYPE_WQ_WRAPPER, ere);
+}
+
+static void process_subq_early_route_add(struct zebra_early_route *ere)
+{
+ struct route_entry *re = ere->re;
+ struct route_table *table;
+ struct nhg_hash_entry *nhe = NULL;
+ struct route_node *rn;
+ struct route_entry *same = NULL, *first_same = NULL;
+ int same_count = 0;
+ rib_dest_t *dest;
+
+ /* Lookup table. */
+ table = zebra_vrf_get_table_with_table_id(ere->afi, ere->safi,
+ re->vrf_id, re->table);
+ if (!table) {
+ early_route_memory_free(ere);
+ return;
+ }
+
+ if (re->nhe_id > 0) {
+ nhe = zebra_nhg_lookup_id(re->nhe_id);
+
+ if (!nhe) {
+ /*
+ * We've received from the kernel a nexthop id
+ * that we don't have saved yet. More than likely
+ * it has not been processed and is on the
+ * queue to be processed. Let's stop what we
+ * are doing and cause the meta q to be processed
+ * storing this for later.
+ *
+ * This is being done this way because zebra
+ * runs with the assumption t
+ */
+ flog_err(
+ EC_ZEBRA_TABLE_LOOKUP_FAILED,
+ "Zebra failed to find the nexthop hash entry for id=%u in a route entry %pFX",
+ re->nhe_id, &ere->p);
+
+ early_route_memory_free(ere);
+ return;
+ }
+ } else {
+ /* Lookup nhe from route information */
+ nhe = zebra_nhg_rib_find_nhe(ere->re_nhe, ere->afi);
+ if (!nhe) {
+ char buf2[PREFIX_STRLEN] = "";
+
+ flog_err(
+ EC_ZEBRA_TABLE_LOOKUP_FAILED,
+ "Zebra failed to find or create a nexthop hash entry for %pFX%s%s",
+ &ere->p, ere->src_p_provided ? " from " : "",
+ ere->src_p_provided
+ ? prefix2str(&ere->src_p, buf2,
+ sizeof(buf2))
+ : "");
+
+ early_route_memory_free(ere);
+ return;
+ }
+ }
+
+ /*
+ * Attach the re to the nhe's nexthop group.
+ *
+ * TODO: This will need to change when we start getting IDs from upper
+ * level protocols, as the refcnt might be wrong, since it checks
+ * if old_id != new_id.
+ */
+ route_entry_update_nhe(re, nhe);
+
+ /* Make it sure prefixlen is applied to the prefix. */
+ apply_mask(&ere->p);
+ if (ere->src_p_provided)
+ apply_mask_ipv6(&ere->src_p);
+
+ /* Set default distance by route type. */
+ if (re->distance == 0)
+ re->distance = route_distance(re->type);
+
+ /* Lookup route node.*/
+ rn = srcdest_rnode_get(table, &ere->p,
+ ere->src_p_provided ? &ere->src_p : NULL);
+
+ /*
+ * If same type of route are installed, treat it as a implicit
+ * withdraw. If the user has specified the No route replace semantics
+ * for the install don't do a route replace.
+ */
+ RNODE_FOREACH_RE (rn, same) {
+ if (CHECK_FLAG(same->status, ROUTE_ENTRY_REMOVED)) {
+ same_count++;
+ continue;
+ }
+
+ /* Compare various route_entry properties */
+ if (rib_compare_routes(re, same)) {
+ same_count++;
+
+ if (first_same == NULL)
+ first_same = same;
+ }
+ }
+
+ same = first_same;
+
+ if (!ere->startup && (re->flags & ZEBRA_FLAG_SELFROUTE) &&
+ zrouter.asic_offloaded) {
+ if (!same) {
+ if (IS_ZEBRA_DEBUG_RIB)
+ zlog_debug(
+ "prefix: %pRN is a self route where we do not have an entry for it. Dropping this update, it's useless",
+ rn);
+ /*
+ * We are not on startup, this is a self route
+ * and we have asic offload. Which means
+ * we are getting a callback for a entry
+ * that was already deleted to the kernel
+ * but an earlier response was just handed
+ * back. Drop it on the floor
+ */
+ early_route_memory_free(ere);
+ return;
+ }
+ }
+
+ /* If this route is kernel/connected route, notify the dataplane. */
+ if (RIB_SYSTEM_ROUTE(re)) {
+ /* Notify dataplane */
+ dplane_sys_route_add(rn, re);
+ }
+
+ /* Link new re to node.*/
+ if (IS_ZEBRA_DEBUG_RIB) {
+ rnode_debug(
+ rn, re->vrf_id,
+ "Inserting route rn %p, re %p (%s) existing %p, same_count %d",
+ rn, re, zebra_route_string(re->type), same, same_count);
+
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ route_entry_dump(
+ &ere->p,
+ ere->src_p_provided ? &ere->src_p : NULL, re);
+ }
+
+ SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
+ rib_addnode(rn, re, 1);
+
+ /* Free implicit route.*/
+ if (same)
+ rib_delnode(rn, same);
+
+ /* See if we can remove some RE entries that are queued for
+ * removal, but won't be considered in rib processing.
+ */
+ dest = rib_dest_from_rnode(rn);
+ RNODE_FOREACH_RE_SAFE (rn, re, same) {
+ if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED)) {
+ /* If the route was used earlier, must retain it. */
+ if (dest && re == dest->selected_fib)
+ continue;
+
+ if (IS_ZEBRA_DEBUG_RIB)
+ rnode_debug(rn, re->vrf_id,
+ "rn %p, removing unneeded re %p",
+ rn, re);
+
+ rib_unlink(rn, re);
+ }
+ }
+
+ route_unlock_node(rn);
+ if (ere->re_nhe)
+ zebra_nhg_free(ere->re_nhe);
+ XFREE(MTYPE_WQ_WRAPPER, ere);
+}
+
+static void process_subq_early_route_delete(struct zebra_early_route *ere)
+{
+ struct route_table *table;
+ struct route_node *rn;
+ struct route_entry *re;
+ struct route_entry *fib = NULL;
+ struct route_entry *same = NULL;
+ struct nexthop *rtnh;
+ char buf2[INET6_ADDRSTRLEN];
+ rib_dest_t *dest;
+
+ if (ere->src_p_provided)
+ assert(!ere->src_p.prefixlen || ere->afi == AFI_IP6);
+
+ /* Lookup table. */
+ table = zebra_vrf_lookup_table_with_table_id(
+ ere->afi, ere->safi, ere->re->vrf_id, ere->re->table);
+ if (!table) {
+ early_route_memory_free(ere);
+ return;
+ }
+
+ /* Apply mask. */
+ apply_mask(&ere->p);
+ if (ere->src_p_provided)
+ apply_mask_ipv6(&ere->src_p);
+
+ /* Lookup route node. */
+ rn = srcdest_rnode_lookup(table, &ere->p,
+ ere->src_p_provided ? &ere->src_p : NULL);
+ if (!rn) {
+ if (IS_ZEBRA_DEBUG_RIB) {
+ char src_buf[PREFIX_STRLEN];
+ struct vrf *vrf = vrf_lookup_by_id(ere->re->vrf_id);
+
+ if (ere->src_p_provided && ere->src_p.prefixlen)
+ prefix2str(&ere->src_p, src_buf,
+ sizeof(src_buf));
+ else
+ src_buf[0] = '\0';
+
+ zlog_debug("%s[%d]:%pRN%s%s doesn't exist in rib",
+ vrf->name, ere->re->table, rn,
+ (src_buf[0] != '\0') ? " from " : "",
+ src_buf);
+ }
+ early_route_memory_free(ere);
+ return;
+ }
+
+ dest = rib_dest_from_rnode(rn);
+ fib = dest->selected_fib;
+
+ struct nexthop *nh = NULL;
+
+ if (ere->re->nhe)
+ nh = ere->re->nhe->nhg.nexthop;
+
+ /* Lookup same type route. */
+ RNODE_FOREACH_RE (rn, re) {
+ if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED))
+ continue;
+
+ if (re->type != ere->re->type)
+ continue;
+ if (re->instance != ere->re->instance)
+ continue;
+ if (CHECK_FLAG(re->flags, ZEBRA_FLAG_RR_USE_DISTANCE) &&
+ ere->re->distance != re->distance)
+ continue;
+
+ if (re->type == ZEBRA_ROUTE_KERNEL &&
+ re->metric != ere->re->metric)
+ continue;
+ if (re->type == ZEBRA_ROUTE_CONNECT && (rtnh = nh) &&
+ rtnh->type == NEXTHOP_TYPE_IFINDEX && nh) {
+ if (rtnh->ifindex != nh->ifindex)
+ continue;
+ same = re;
+ break;
+ }
+
+ /* Make sure that the route found has the same gateway. */
+ if (ere->re->nhe_id && re->nhe_id == ere->re->nhe_id) {
+ same = re;
+ break;
+ }
+
+ if (nh == NULL) {
+ same = re;
+ break;
+ }
+ for (ALL_NEXTHOPS(re->nhe->nhg, rtnh)) {
+ /*
+ * No guarantee all kernel send nh with labels
+ * on delete.
+ */
+ if (nexthop_same_no_labels(rtnh, nh)) {
+ same = re;
+ break;
+ }
+ }
+
+ if (same)
+ break;
+ }
+ /*
+ * If same type of route can't be found and this message is from
+ * kernel.
+ */
+ if (!same) {
+ /*
+ * In the past(HA!) we could get here because
+ * we were receiving a route delete from the
+ * kernel and we're not marking the proto
+ * as coming from it's appropriate originator.
+ * Now that we are properly noticing the fact
+ * that the kernel has deleted our route we
+ * are not going to get called in this path
+ * I am going to leave this here because
+ * this might still work this way on non-linux
+ * platforms as well as some weird state I have
+ * not properly thought of yet.
+ * If we can show that this code path is
+ * dead then we can remove it.
+ */
+ if (fib && CHECK_FLAG(ere->re->flags, ZEBRA_FLAG_SELFROUTE)) {
+ if (IS_ZEBRA_DEBUG_RIB) {
+ rnode_debug(
+ rn, ere->re->vrf_id,
+ "rn %p, re %p (%s) was deleted from kernel, adding",
+ rn, fib, zebra_route_string(fib->type));
+ }
+ if (zrouter.allow_delete ||
+ CHECK_FLAG(dest->flags, RIB_ROUTE_ANY_QUEUED)) {
+ UNSET_FLAG(fib->status, ROUTE_ENTRY_INSTALLED);
+ /* Unset flags. */
+ for (rtnh = fib->nhe->nhg.nexthop; rtnh;
+ rtnh = rtnh->next)
+ UNSET_FLAG(rtnh->flags,
+ NEXTHOP_FLAG_FIB);
+
+ /*
+ * This is a non FRR route
+ * as such we should mark
+ * it as deleted
+ */
+ dest->selected_fib = NULL;
+ } else {
+ /*
+ * This means someone else, other than Zebra,
+ * has deleted a Zebra router from the kernel.
+ * We will add it back
+ */
+ rib_install_kernel(rn, fib, NULL);
+ }
+ } else {
+ if (IS_ZEBRA_DEBUG_RIB) {
+ if (nh)
+ rnode_debug(
+ rn, ere->re->vrf_id,
+ "via %s ifindex %d type %d doesn't exist in rib",
+ inet_ntop(afi2family(ere->afi),
+ &nh->gate, buf2,
+ sizeof(buf2)),
+ nh->ifindex, ere->re->type);
+ else
+ rnode_debug(
+ rn, ere->re->vrf_id,
+ "type %d doesn't exist in rib",
+ ere->re->type);
+ }
+ route_unlock_node(rn);
+ early_route_memory_free(ere);
+ return;
+ }
+ }
+
+ if (same) {
+ struct nexthop *tmp_nh;
+
+ if (ere->fromkernel &&
+ CHECK_FLAG(ere->re->flags, ZEBRA_FLAG_SELFROUTE) &&
+ !zrouter.allow_delete) {
+ rib_install_kernel(rn, same, NULL);
+ route_unlock_node(rn);
+
+ early_route_memory_free(ere);
+ return;
+ }
+
+ /* Special handling for IPv4 or IPv6 routes sourced from
+ * EVPN - the nexthop (and associated MAC) need to be
+ * uninstalled if no more refs.
+ */
+ for (ALL_NEXTHOPS(re->nhe->nhg, tmp_nh)) {
+ struct ipaddr vtep_ip;
+
+ if (CHECK_FLAG(tmp_nh->flags, NEXTHOP_FLAG_EVPN)) {
+ memset(&vtep_ip, 0, sizeof(struct ipaddr));
+ if (ere->afi == AFI_IP) {
+ vtep_ip.ipa_type = IPADDR_V4;
+ memcpy(&(vtep_ip.ipaddr_v4),
+ &(tmp_nh->gate.ipv4),
+ sizeof(struct in_addr));
+ } else {
+ vtep_ip.ipa_type = IPADDR_V6;
+ memcpy(&(vtep_ip.ipaddr_v6),
+ &(tmp_nh->gate.ipv6),
+ sizeof(struct in6_addr));
+ }
+ zebra_rib_queue_evpn_route_del(
+ re->vrf_id, &vtep_ip, &ere->p);
+ }
+ }
+
+ /* Notify dplane if system route changes */
+ if (RIB_SYSTEM_ROUTE(re))
+ dplane_sys_route_del(rn, same);
+
+ rib_delnode(rn, same);
+ }
+
+ route_unlock_node(rn);
+
+ early_route_memory_free(ere);
+}
+
+/*
+ * When FRR receives a route we need to match the route up to
+ * nexthop groups. That we also may have just received
+ * place the data on this queue so that this work of finding
+ * the nexthop group entries for the route entry is always
+ * done after the nexthop group has had a chance to be processed
+ */
+static void process_subq_early_route(struct listnode *lnode)
+{
+ struct zebra_early_route *ere = listgetdata(lnode);
+
+ if (ere->deletion)
+ process_subq_early_route_delete(ere);
+ else
+ process_subq_early_route_add(ere);
+}
+
/*
* Examine the specified subqueue; process one entry and return 1 if
* there is a node, return 0 otherwise.
@@ -2525,6 +3035,12 @@ static unsigned int process_subq(struct list *subq,
case META_QUEUE_NHG:
process_subq_nhg(lnode);
break;
+ case META_QUEUE_EARLY_ROUTE:
+ process_subq_early_route(lnode);
+ break;
+ case META_QUEUE_EARLY_LABEL:
+ process_subq_early_label(lnode);
+ break;
case META_QUEUE_CONNECTED:
case META_QUEUE_KERNEL:
case META_QUEUE_STATIC:
@@ -2555,8 +3071,9 @@ static wq_item_status meta_queue_process(struct work_queue *dummy, void *data)
queue_len = dplane_get_in_queue_len();
if (queue_len > queue_limit) {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug("rib queue: dplane queue len %u, limit %u, retrying",
- queue_len, queue_limit);
+ zlog_debug(
+ "rib queue: dplane queue len %u, limit %u, retrying",
+ queue_len, queue_limit);
/* Ensure that the meta-queue is actually enqueued */
if (work_queue_empty(zrouter.ribq))
@@ -2635,6 +3152,13 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
return 0;
}
+static int early_label_meta_queue_add(struct meta_queue *mq, void *data)
+{
+ listnode_add(mq->subq[META_QUEUE_EARLY_LABEL], data);
+ mq->size++;
+ return 0;
+}
+
static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data)
{
struct nhg_ctx *ctx = NULL;
@@ -2718,6 +3242,44 @@ static int mq_add_handler(void *data,
return mq_add_func(zrouter.mq, data);
}
+void mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ struct prefix *prefix, uint8_t route_type,
+ uint8_t route_instance)
+{
+ struct wq_label_wrapper *w;
+
+ w = XCALLOC(MTYPE_WQ_WRAPPER, sizeof(struct wq_label_wrapper));
+
+ w->type = WQ_LABEL_FTN_UNINSTALL;
+ w->vrf_id = zvrf->vrf->vrf_id;
+ w->p = *prefix;
+ w->ltype = type;
+ w->route_type = route_type;
+ w->route_instance = route_instance;
+
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("Early Label Handling for %pFX", prefix);
+
+ mq_add_handler(w, early_label_meta_queue_add);
+}
+
+void mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
+ const struct zapi_labels *zl)
+{
+ struct wq_label_wrapper *w;
+
+ w = XCALLOC(MTYPE_WQ_WRAPPER, sizeof(struct wq_label_wrapper));
+ w->type = WQ_LABEL_LABELS_PROCESS;
+ w->vrf_id = zvrf->vrf->vrf_id;
+ w->add_p = add_p;
+ w->zl = *zl;
+
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("Early Label Handling: Labels Process");
+
+ mq_add_handler(w, early_label_meta_queue_add);
+}
+
/* Add route_node to work queue and schedule processing */
int rib_queue_add(struct route_node *rn)
{
@@ -2958,7 +3520,6 @@ int zebra_rib_queue_evpn_rem_vtep_del(vrf_id_t vrf_id, vni_t vni,
return mq_add_handler(w, rib_meta_queue_evpn_add);
}
-
/* Create new meta queue.
A destructor function doesn't seem to be necessary here.
*/
@@ -3034,6 +3595,29 @@ static void nhg_meta_queue_free(struct meta_queue *mq, struct list *l,
}
}
+static void early_label_meta_queue_free(struct meta_queue *mq, struct list *l,
+ struct zebra_vrf *zvrf)
+{
+ struct wq_label_wrapper *w;
+ struct listnode *node, *nnode;
+
+ for (ALL_LIST_ELEMENTS(l, node, nnode, w)) {
+ if (zvrf && zvrf->vrf->vrf_id != w->vrf_id)
+ continue;
+
+ switch (w->type) {
+ case WQ_LABEL_FTN_UNINSTALL:
+ case WQ_LABEL_LABELS_PROCESS:
+ break;
+ }
+
+ node->data = NULL;
+ XFREE(MTYPE_WQ_WRAPPER, w);
+ list_delete_node(l, node);
+ mq->size--;
+ }
+}
+
static void rib_meta_queue_free(struct meta_queue *mq, struct list *l,
struct zebra_vrf *zvrf)
{
@@ -3053,6 +3637,22 @@ static void rib_meta_queue_free(struct meta_queue *mq, struct list *l,
}
}
+static void early_route_meta_queue_free(struct meta_queue *mq, struct list *l,
+ struct zebra_vrf *zvrf)
+{
+ struct zebra_early_route *zer;
+ struct listnode *node, *nnode;
+
+ for (ALL_LIST_ELEMENTS(l, node, nnode, zer)) {
+ if (zvrf && zer->re->vrf_id != zvrf->vrf->vrf_id)
+ continue;
+
+ XFREE(MTYPE_RE, zer);
+ node->data = NULL;
+ list_delete_node(l, node);
+ mq->size--;
+ }
+}
void meta_queue_free(struct meta_queue *mq, struct zebra_vrf *zvrf)
{
@@ -3067,6 +3667,12 @@ void meta_queue_free(struct meta_queue *mq, struct zebra_vrf *zvrf)
case META_QUEUE_EVPN:
evpn_meta_queue_free(mq, mq->subq[i], zvrf);
break;
+ case META_QUEUE_EARLY_ROUTE:
+ early_route_meta_queue_free(mq, mq->subq[i], zvrf);
+ break;
+ case META_QUEUE_EARLY_LABEL:
+ early_label_meta_queue_free(mq, mq->subq[i], zvrf);
+ break;
case META_QUEUE_CONNECTED:
case META_QUEUE_KERNEL:
case META_QUEUE_STATIC:
@@ -3210,17 +3816,6 @@ static void rib_addnode(struct route_node *rn,
rib_link(rn, re, process);
}
-static void rib_re_nhg_free(struct route_entry *re)
-{
- if (re->nhe && re->nhe_id) {
- assert(re->nhe->id == re->nhe_id);
- route_entry_update_nhe(re, NULL);
- } else if (re->nhe && re->nhe->nhg.nexthop)
- nexthops_free(re->nhe->nhg.nexthop);
-
- nexthops_free(re->fib_ng.nexthop);
-}
-
/*
* rib_unlink
*
@@ -3426,6 +4021,46 @@ void _route_entry_dump(const char *func, union prefixconstptr pp,
zlog_debug("%s: dump complete", straddr);
}
+static int rib_meta_queue_early_route_add(struct meta_queue *mq, void *data)
+{
+ struct zebra_early_route *ere = data;
+
+ listnode_add(mq->subq[META_QUEUE_EARLY_ROUTE], data);
+ mq->size++;
+
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug(
+ "Route %pFX(%u) queued for processing into sub-queue %s",
+ &ere->p, ere->re->vrf_id,
+ subqueue2str(META_QUEUE_EARLY_ROUTE));
+
+ return 0;
+}
+
+struct route_entry *zebra_rib_route_entry_new(vrf_id_t vrf_id, int type,
+ uint8_t instance, uint32_t flags,
+ uint32_t nhe_id,
+ uint32_t table_id,
+ uint32_t metric, uint32_t mtu,
+ uint8_t distance, route_tag_t tag)
+{
+ struct route_entry *re;
+
+ re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
+ re->type = type;
+ re->instance = instance;
+ re->distance = distance;
+ re->flags = flags;
+ re->metric = metric;
+ re->mtu = mtu;
+ re->table = table_id;
+ re->vrf_id = vrf_id;
+ re->uptime = monotime(NULL);
+ re->tag = tag;
+ re->nhe_id = nhe_id;
+
+ return re;
+}
/*
* Internal route-add implementation; there are a couple of different public
* signatures. Callers in this path are responsible for the memory they
@@ -3441,162 +4076,25 @@ int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p,
struct prefix_ipv6 *src_p, struct route_entry *re,
struct nhg_hash_entry *re_nhe, bool startup)
{
- struct nhg_hash_entry *nhe = NULL;
- struct route_table *table;
- struct route_node *rn;
- struct route_entry *same = NULL, *first_same = NULL;
- int ret = 0;
- int same_count = 0;
- rib_dest_t *dest;
+ struct zebra_early_route *ere;
- if (!re || !re_nhe)
+ if (!re)
return -1;
assert(!src_p || !src_p->prefixlen || afi == AFI_IP6);
- /* Lookup table. */
- table = zebra_vrf_get_table_with_table_id(afi, safi, re->vrf_id,
- re->table);
- if (!table)
- return -1;
-
- if (re->nhe_id > 0) {
- nhe = zebra_nhg_lookup_id(re->nhe_id);
-
- if (!nhe) {
- flog_err(
- EC_ZEBRA_TABLE_LOOKUP_FAILED,
- "Zebra failed to find the nexthop hash entry for id=%u in a route entry",
- re->nhe_id);
-
- return -1;
- }
- } else {
- /* Lookup nhe from route information */
- nhe = zebra_nhg_rib_find_nhe(re_nhe, afi);
- if (!nhe) {
- char buf2[PREFIX_STRLEN] = "";
-
- flog_err(
- EC_ZEBRA_TABLE_LOOKUP_FAILED,
- "Zebra failed to find or create a nexthop hash entry for %pFX%s%s",
- p, src_p ? " from " : "",
- src_p ? prefix2str(src_p, buf2, sizeof(buf2))
- : "");
-
- return -1;
- }
- }
-
- /*
- * Attach the re to the nhe's nexthop group.
- *
- * TODO: This will need to change when we start getting IDs from upper
- * level protocols, as the refcnt might be wrong, since it checks
- * if old_id != new_id.
- */
- route_entry_update_nhe(re, nhe);
-
- /* Make it sure prefixlen is applied to the prefix. */
- apply_mask(p);
+ ere = XCALLOC(MTYPE_WQ_WRAPPER, sizeof(*ere));
+ ere->afi = afi;
+ ere->safi = safi;
+ ere->p = *p;
if (src_p)
- apply_mask_ipv6(src_p);
-
- /* Set default distance by route type. */
- if (re->distance == 0)
- re->distance = route_distance(re->type);
-
- /* Lookup route node.*/
- rn = srcdest_rnode_get(table, p, src_p);
-
- /*
- * If same type of route are installed, treat it as a implicit
- * withdraw. If the user has specified the No route replace semantics
- * for the install don't do a route replace.
- */
- RNODE_FOREACH_RE (rn, same) {
- if (CHECK_FLAG(same->status, ROUTE_ENTRY_REMOVED)) {
- same_count++;
- continue;
- }
-
- /* Compare various route_entry properties */
- if (rib_compare_routes(re, same)) {
- same_count++;
-
- if (first_same == NULL)
- first_same = same;
- }
- }
-
- same = first_same;
-
- if (!startup &&
- (re->flags & ZEBRA_FLAG_SELFROUTE) && zrouter.asic_offloaded) {
- if (!same) {
- if (IS_ZEBRA_DEBUG_RIB)
- zlog_debug("prefix: %pRN is a self route where we do not have an entry for it. Dropping this update, it's useless", rn);
- /*
- * We are not on startup, this is a self route
- * and we have asic offload. Which means
- * we are getting a callback for a entry
- * that was already deleted to the kernel
- * but an earlier response was just handed
- * back. Drop it on the floor
- */
- rib_re_nhg_free(re);
-
- XFREE(MTYPE_RE, re);
- return ret;
- }
- }
-
- /* If this route is kernel/connected route, notify the dataplane. */
- if (RIB_SYSTEM_ROUTE(re)) {
- /* Notify dataplane */
- dplane_sys_route_add(rn, re);
- }
-
- /* Link new re to node.*/
- if (IS_ZEBRA_DEBUG_RIB) {
- rnode_debug(rn, re->vrf_id,
- "Inserting route rn %p, re %p (%s) existing %p, same_count %d",
- rn, re, zebra_route_string(re->type), same,
- same_count);
-
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- route_entry_dump(p, src_p, re);
- }
-
- SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
- rib_addnode(rn, re, 1);
-
- /* Free implicit route.*/
- if (same) {
- ret = 1;
- rib_delnode(rn, same);
- }
-
- /* See if we can remove some RE entries that are queued for
- * removal, but won't be considered in rib processing.
- */
- dest = rib_dest_from_rnode(rn);
- RNODE_FOREACH_RE_SAFE (rn, re, same) {
- if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED)) {
- /* If the route was used earlier, must retain it. */
- if (dest && re == dest->selected_fib)
- continue;
-
- if (IS_ZEBRA_DEBUG_RIB)
- rnode_debug(rn, re->vrf_id, "rn %p, removing unneeded re %p",
- rn, re);
+ ere->src_p = *src_p;
+ ere->src_p_provided = !!src_p;
+ ere->re = re;
+ ere->re_nhe = re_nhe;
+ ere->startup = startup;
- rib_unlink(rn, re);
- }
- }
-
- route_unlock_node(rn);
- return ret;
+ return mq_add_handler(ere, rib_meta_queue_early_route_add);
}
/*
@@ -3607,7 +4105,7 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
struct nexthop_group *ng, bool startup)
{
int ret;
- struct nhg_hash_entry nhe;
+ struct nhg_hash_entry nhe, *n;
if (!re)
return -1;
@@ -3625,10 +4123,8 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
else if (re->nhe_id > 0)
nhe.id = re->nhe_id;
- ret = rib_add_multipath_nhe(afi, safi, p, src_p, re, &nhe, startup);
-
- /* In this path, the callers expect memory to be freed. */
- nexthop_group_delete(&ng);
+ n = zebra_nhe_copy(&nhe, 0);
+ ret = rib_add_multipath_nhe(afi, safi, p, src_p, re, n, startup);
/* In error cases, free the route also */
if (ret < 0)
@@ -3643,212 +4139,32 @@ void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
uint32_t nhe_id, uint32_t table_id, uint32_t metric,
uint8_t distance, bool fromkernel)
{
- struct route_table *table;
- struct route_node *rn;
- struct route_entry *re;
- struct route_entry *fib = NULL;
- struct route_entry *same = NULL;
- struct nexthop *rtnh;
- char buf2[INET6_ADDRSTRLEN];
- rib_dest_t *dest;
-
- assert(!src_p || !src_p->prefixlen || afi == AFI_IP6);
-
- /* Lookup table. */
- table = zebra_vrf_lookup_table_with_table_id(afi, safi, vrf_id,
- table_id);
- if (!table)
- return;
-
- /* Apply mask. */
- apply_mask(p);
- if (src_p)
- apply_mask_ipv6(src_p);
-
- /* Lookup route node. */
- rn = srcdest_rnode_lookup(table, p, src_p);
- if (!rn) {
- if (IS_ZEBRA_DEBUG_RIB) {
- char src_buf[PREFIX_STRLEN];
- struct vrf *vrf = vrf_lookup_by_id(vrf_id);
-
- if (src_p && src_p->prefixlen)
- prefix2str(src_p, src_buf, sizeof(src_buf));
- else
- src_buf[0] = '\0';
-
- zlog_debug("%s[%d]:%pRN%s%s doesn't exist in rib",
- vrf->name, table_id, rn,
- (src_buf[0] != '\0') ? " from " : "",
- src_buf);
- }
- return;
- }
-
- dest = rib_dest_from_rnode(rn);
- fib = dest->selected_fib;
-
- /* Lookup same type route. */
- RNODE_FOREACH_RE (rn, re) {
- if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED))
- continue;
-
- if (re->type != type)
- continue;
- if (re->instance != instance)
- continue;
- if (CHECK_FLAG(re->flags, ZEBRA_FLAG_RR_USE_DISTANCE) &&
- distance != re->distance)
- continue;
-
- if (re->type == ZEBRA_ROUTE_KERNEL && re->metric != metric)
- continue;
- if (re->type == ZEBRA_ROUTE_CONNECT &&
- (rtnh = re->nhe->nhg.nexthop)
- && rtnh->type == NEXTHOP_TYPE_IFINDEX && nh) {
- if (rtnh->ifindex != nh->ifindex)
- continue;
- same = re;
- break;
- }
-
- /* Make sure that the route found has the same gateway. */
- if (nhe_id && re->nhe_id == nhe_id) {
- same = re;
- break;
- }
-
- if (nh == NULL) {
- same = re;
- break;
- }
- for (ALL_NEXTHOPS(re->nhe->nhg, rtnh)) {
- /*
- * No guarantee all kernel send nh with labels
- * on delete.
- */
- if (nexthop_same_no_labels(rtnh, nh)) {
- same = re;
- break;
- }
- }
-
- if (same)
- break;
- }
- /* If same type of route can't be found and this message is from
- kernel. */
- if (!same) {
- /*
- * In the past(HA!) we could get here because
- * we were receiving a route delete from the
- * kernel and we're not marking the proto
- * as coming from it's appropriate originator.
- * Now that we are properly noticing the fact
- * that the kernel has deleted our route we
- * are not going to get called in this path
- * I am going to leave this here because
- * this might still work this way on non-linux
- * platforms as well as some weird state I have
- * not properly thought of yet.
- * If we can show that this code path is
- * dead then we can remove it.
- */
- if (fib && CHECK_FLAG(flags, ZEBRA_FLAG_SELFROUTE)) {
- if (IS_ZEBRA_DEBUG_RIB) {
- rnode_debug(rn, vrf_id,
- "rn %p, re %p (%s) was deleted from kernel, adding",
- rn, fib,
- zebra_route_string(fib->type));
- }
- if (zrouter.allow_delete ||
- CHECK_FLAG(dest->flags, RIB_ROUTE_ANY_QUEUED)) {
- UNSET_FLAG(fib->status, ROUTE_ENTRY_INSTALLED);
- /* Unset flags. */
- for (rtnh = fib->nhe->nhg.nexthop; rtnh;
- rtnh = rtnh->next)
- UNSET_FLAG(rtnh->flags,
- NEXTHOP_FLAG_FIB);
-
- /*
- * This is a non FRR route
- * as such we should mark
- * it as deleted
- */
- dest->selected_fib = NULL;
- } else {
- /* This means someone else, other than Zebra,
- * has deleted
- * a Zebra router from the kernel. We will add
- * it back */
- rib_install_kernel(rn, fib, NULL);
- }
- } else {
- if (IS_ZEBRA_DEBUG_RIB) {
- if (nh)
- rnode_debug(
- rn, vrf_id,
- "via %s ifindex %d type %d doesn't exist in rib",
- inet_ntop(afi2family(afi),
- &nh->gate, buf2,
- sizeof(buf2)),
- nh->ifindex, type);
- else
- rnode_debug(
- rn, vrf_id,
- "type %d doesn't exist in rib",
- type);
- }
- route_unlock_node(rn);
- return;
- }
- }
-
- if (same) {
- struct nexthop *tmp_nh;
-
- if (fromkernel && CHECK_FLAG(flags, ZEBRA_FLAG_SELFROUTE) &&
- !zrouter.allow_delete) {
- rib_install_kernel(rn, same, NULL);
- route_unlock_node(rn);
-
- return;
- }
-
- /* Special handling for IPv4 or IPv6 routes sourced from
- * EVPN - the nexthop (and associated MAC) need to be
- * uninstalled if no more refs.
- */
- for (ALL_NEXTHOPS(re->nhe->nhg, tmp_nh)) {
- struct ipaddr vtep_ip;
-
- if (CHECK_FLAG(tmp_nh->flags, NEXTHOP_FLAG_EVPN)) {
- memset(&vtep_ip, 0, sizeof(struct ipaddr));
- if (afi == AFI_IP) {
- vtep_ip.ipa_type = IPADDR_V4;
- memcpy(&(vtep_ip.ipaddr_v4),
- &(tmp_nh->gate.ipv4),
- sizeof(struct in_addr));
- } else {
- vtep_ip.ipa_type = IPADDR_V6;
- memcpy(&(vtep_ip.ipaddr_v6),
- &(tmp_nh->gate.ipv6),
- sizeof(struct in6_addr));
- }
- zebra_rib_queue_evpn_route_del(re->vrf_id,
- &vtep_ip, p);
- }
- }
+ struct zebra_early_route *ere;
+ struct route_entry *re = NULL;
+ struct nhg_hash_entry *nhe = NULL;
- /* Notify dplane if system route changes */
- if (RIB_SYSTEM_ROUTE(re))
- dplane_sys_route_del(rn, same);
+ re = zebra_rib_route_entry_new(vrf_id, type, instance, flags, nhe_id,
+ table_id, metric, 0, distance, 0);
- rib_delnode(rn, same);
+ if (nh) {
+ nhe = zebra_nhg_alloc();
+ nhe->nhg.nexthop = nexthop_dup(nh, NULL);
}
- route_unlock_node(rn);
- return;
+ ere = XCALLOC(MTYPE_WQ_WRAPPER, sizeof(*ere));
+ ere->afi = afi;
+ ere->safi = safi;
+ ere->p = *p;
+ if (src_p)
+ ere->src_p = *src_p;
+ ere->src_p_provided = !!src_p;
+ ere->re = re;
+ ere->re_nhe = nhe;
+ ere->startup = false;
+ ere->deletion = true;
+ ere->fromkernel = fromkernel;
+
+ mq_add_handler(ere, rib_meta_queue_early_route_add);
}
@@ -3859,36 +4175,23 @@ int rib_add(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
uint8_t distance, route_tag_t tag, bool startup)
{
struct route_entry *re = NULL;
- struct nexthop *nexthop = NULL;
- struct nexthop_group *ng = NULL;
+ struct nexthop nexthop = {};
+ struct nexthop_group ng = {};
/* Allocate new route_entry structure. */
- re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
- re->type = type;
- re->instance = instance;
- re->distance = distance;
- re->flags = flags;
- re->metric = metric;
- re->mtu = mtu;
- re->table = table_id;
- re->vrf_id = vrf_id;
- re->uptime = monotime(NULL);
- re->tag = tag;
- re->nhe_id = nhe_id;
+ re = zebra_rib_route_entry_new(vrf_id, type, instance, flags, nhe_id,
+ table_id, metric, mtu, distance, tag);
/* If the owner of the route supplies a shared nexthop-group id,
* we'll use that. Otherwise, pass the nexthop along directly.
*/
if (!nhe_id) {
- ng = nexthop_group_new();
-
/* Add nexthop. */
- nexthop = nexthop_new();
- *nexthop = *nh;
- nexthop_group_add_sorted(ng, nexthop);
+ nexthop = *nh;
+ nexthop_group_add_sorted(&ng, &nexthop);
}
- return rib_add_multipath(afi, safi, p, src_p, re, ng, startup);
+ return rib_add_multipath(afi, safi, p, src_p, re, &ng, startup);
}
static const char *rib_update_event2str(enum rib_update_event event)
diff --git a/zebra/zebra_script.c b/zebra/zebra_script.c
index d247f87708..2e2f4159cd 100644
--- a/zebra/zebra_script.c
+++ b/zebra/zebra_script.c
@@ -329,14 +329,6 @@ void lua_pushzebra_dplane_ctx(lua_State *L, const struct zebra_dplane_ctx *ctx)
lua_setfield(L, -2, "ipset");
break;
}
- case DPLANE_OP_ADDR_INSTALL:
- case DPLANE_OP_ADDR_UNINSTALL:
- case DPLANE_OP_INTF_ADDR_ADD:
- case DPLANE_OP_INTF_ADDR_DEL:
- case DPLANE_OP_INTF_INSTALL:
- case DPLANE_OP_INTF_UPDATE:
- case DPLANE_OP_INTF_DELETE:
- break;
case DPLANE_OP_NEIGH_INSTALL:
case DPLANE_OP_NEIGH_UPDATE:
case DPLANE_OP_NEIGH_DELETE:
@@ -418,6 +410,17 @@ void lua_pushzebra_dplane_ctx(lua_State *L, const struct zebra_dplane_ctx *ctx)
}
lua_setfield(L, -2, "gre");
+ case DPLANE_OP_ADDR_INSTALL:
+ case DPLANE_OP_ADDR_UNINSTALL:
+ case DPLANE_OP_INTF_ADDR_ADD:
+ case DPLANE_OP_INTF_ADDR_DEL:
+ case DPLANE_OP_INTF_INSTALL:
+ case DPLANE_OP_INTF_UPDATE:
+ case DPLANE_OP_INTF_DELETE:
+ case DPLANE_OP_TC_INSTALL:
+ case DPLANE_OP_TC_UPDATE:
+ case DPLANE_OP_TC_DELETE:
+ /* Not currently handled */
case DPLANE_OP_INTF_NETCONFIG: /*NYI*/
case DPLANE_OP_NONE:
break;
diff --git a/zebra/zserv.c b/zebra/zserv.c
index f76b29deff..ebe246ffbc 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -228,8 +228,7 @@ static void zserv_write(struct thread *thread)
case BUFFER_ERROR:
goto zwrite_fail;
case BUFFER_PENDING:
- atomic_store_explicit(&client->last_write_time,
- (uint32_t)monotime(NULL),
+ atomic_store_explicit(&client->last_write_time, monotime(NULL),
memory_order_relaxed);
zserv_client_event(client, ZSERV_CLIENT_WRITE);
return;
@@ -264,8 +263,7 @@ static void zserv_write(struct thread *thread)
case BUFFER_ERROR:
goto zwrite_fail;
case BUFFER_PENDING:
- atomic_store_explicit(&client->last_write_time,
- (uint32_t)monotime(NULL),
+ atomic_store_explicit(&client->last_write_time, monotime(NULL),
memory_order_relaxed);
zserv_client_event(client, ZSERV_CLIENT_WRITE);
return;
@@ -276,8 +274,8 @@ static void zserv_write(struct thread *thread)
atomic_store_explicit(&client->last_write_cmd, wcmd,
memory_order_relaxed);
- atomic_store_explicit(&client->last_write_time,
- (uint32_t)monotime(NULL), memory_order_relaxed);
+ atomic_store_explicit(&client->last_write_time, monotime(NULL),
+ memory_order_relaxed);
return;
@@ -748,7 +746,7 @@ static struct zserv *zserv_client_create(int sock)
client->wb = buffer_new(0);
TAILQ_INIT(&(client->gr_info_queue));
- atomic_store_explicit(&client->connect_time, (uint32_t) monotime(NULL),
+ atomic_store_explicit(&client->connect_time, monotime(NULL),
memory_order_relaxed);
/* Initialize flags */
diff --git a/zebra/zserv.h b/zebra/zserv.h
index 9986cc9f7e..db7b70d7c4 100644
--- a/zebra/zserv.h
+++ b/zebra/zserv.h
@@ -216,15 +216,15 @@ struct zserv {
*/
/* monotime of client creation */
- _Atomic uint32_t connect_time;
+ _Atomic uint64_t connect_time;
/* monotime of last message received */
- _Atomic uint32_t last_read_time;
+ _Atomic uint64_t last_read_time;
/* monotime of last message sent */
- _Atomic uint32_t last_write_time;
+ _Atomic uint64_t last_write_time;
/* command code of last message read */
- _Atomic uint32_t last_read_cmd;
+ _Atomic uint64_t last_read_cmd;
/* command code of last message written */
- _Atomic uint32_t last_write_cmd;
+ _Atomic uint64_t last_write_cmd;
/*
* Number of instances configured with