summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--babeld/kernel.c4
-rw-r--r--babeld/kernel.h2
-rw-r--r--bgpd/bgp_advertise.c2
-rw-r--r--bgpd/bgp_damp.c10
-rw-r--r--bgpd/bgp_dump.c2
-rw-r--r--bgpd/bgp_ecommunity.c17
-rw-r--r--bgpd/bgp_ecommunity.h1
-rw-r--r--bgpd/bgp_evpn.c48
-rw-r--r--bgpd/bgp_evpn_mh.c10
-rw-r--r--bgpd/bgp_evpn_vty.c91
-rw-r--r--bgpd/bgp_fsm.c15
-rw-r--r--bgpd/bgp_io.c2
-rw-r--r--bgpd/bgp_labelpool.c24
-rw-r--r--bgpd/bgp_memory.c8
-rw-r--r--bgpd/bgp_memory.h8
-rw-r--r--bgpd/bgp_mplsvpn.c32
-rw-r--r--bgpd/bgp_nexthop.c7
-rw-r--r--bgpd/bgp_nexthop.h1
-rw-r--r--bgpd/bgp_nht.c52
-rw-r--r--bgpd/bgp_packet.c10
-rw-r--r--bgpd/bgp_route.c91
-rw-r--r--bgpd/bgp_snmp.c4
-rw-r--r--bgpd/bgp_updgrp.c16
-rw-r--r--bgpd/bgp_vty.c124
-rw-r--r--bgpd/bgp_zebra.c43
-rw-r--r--bgpd/bgpd.c50
-rw-r--r--bgpd/bgpd.h7
-rw-r--r--bgpd/rfapi/bgp_rfapi_cfg.c2
-rw-r--r--bgpd/rfapi/rfapi.c11
-rw-r--r--bgpd/rfapi/rfapi_import.c4
-rw-r--r--bgpd/rfapi/rfapi_private.h5
-rw-r--r--bgpd/rfapi/rfapi_rib.c22
-rw-r--r--bgpd/rfapi/rfapi_vty.c2
-rw-r--r--configure.ac2
-rw-r--r--debian/control1
-rw-r--r--doc/user/bgp.rst16
-rw-r--r--doc/user/pimv6.rst5
-rw-r--r--gdb/lib.txt22
-rw-r--r--lib/sigevent.c3
-rw-r--r--lib/thread.c5
-rw-r--r--lib/zclient.c16
-rw-r--r--nhrpd/nhrp_interface.c3
-rw-r--r--ospf6d/ospf6_abr.c7
-rw-r--r--ospfd/ospf_lsa.c9
-rw-r--r--ospfd/ospf_vty.c27
-rw-r--r--pimd/mtracebis_netlink.c2
-rw-r--r--pimd/pim6_mld.c164
-rw-r--r--pimd/pim6_mld.h1
-rw-r--r--pimd/pim_addr.h4
-rw-r--r--pimd/pim_cmd.c4
-rw-r--r--pimd/pim_cmd_common.c226
-rw-r--r--pimd/pim_iface.c8
-rw-r--r--pimd/pim_igmp.c11
-rw-r--r--pimd/pim_instance.h2
-rw-r--r--pimd/pim_nb_config.c52
-rw-r--r--pimd/pim_neighbor.c9
-rw-r--r--pimd/pim_neighbor.h2
-rw-r--r--pimd/pim_rp.c61
-rw-r--r--pimd/pim_sock.c2
-rw-r--r--pimd/pim_vty.c26
-rw-r--r--ripd/ripd.c3
-rw-r--r--sharpd/sharp_vty.c14
-rw-r--r--tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py396
-rwxr-xr-xtests/topotests/bgp_distance_change/bgp_admin_dist.json402
-rwxr-xr-xtests/topotests/bgp_distance_change/bgp_admin_dist_vrf.json429
-rwxr-xr-xtests/topotests/bgp_distance_change/test_bgp_admin_dist.py1282
-rwxr-xr-xtests/topotests/bgp_distance_change/test_bgp_admin_dist_vrf.py900
-rw-r--r--tests/topotests/bgp_soo/__init__.py0
-rw-r--r--tests/topotests/bgp_soo/cpe1/bgpd.conf10
-rw-r--r--tests/topotests/bgp_soo/cpe1/zebra.conf12
-rw-r--r--tests/topotests/bgp_soo/cpe2/bgpd.conf10
-rw-r--r--tests/topotests/bgp_soo/cpe2/zebra.conf9
-rw-r--r--tests/topotests/bgp_soo/pe1/bgpd.conf27
-rw-r--r--tests/topotests/bgp_soo/pe1/ldpd.conf10
-rw-r--r--tests/topotests/bgp_soo/pe1/ospfd.conf7
-rw-r--r--tests/topotests/bgp_soo/pe1/zebra.conf12
-rw-r--r--tests/topotests/bgp_soo/pe2/bgpd.conf31
-rw-r--r--tests/topotests/bgp_soo/pe2/ldpd.conf10
-rw-r--r--tests/topotests/bgp_soo/pe2/ospfd.conf7
-rw-r--r--tests/topotests/bgp_soo/pe2/zebra.conf12
-rw-r--r--tests/topotests/bgp_soo/test_bgp_soo.py186
-rwxr-xr-xtests/topotests/conftest.py2
-rw-r--r--tests/topotests/lib/bgp.py31
-rw-r--r--tests/topotests/lib/common_config.py32
-rw-r--r--tests/topotests/lib/topogen.py2
-rwxr-xr-xtools/frrcommon.sh.in12
-rw-r--r--zebra/kernel_socket.c8
-rw-r--r--zebra/redistribute.c13
-rw-r--r--zebra/rib.h22
-rw-r--r--zebra/rt_netlink.c65
-rw-r--r--zebra/tc_netlink.c2
-rw-r--r--zebra/zapi_msg.c34
-rw-r--r--zebra/zebra_dplane.c5
-rw-r--r--zebra/zebra_mpls.c10
-rw-r--r--zebra/zebra_mpls.h17
-rw-r--r--zebra/zebra_rib.c1083
-rw-r--r--zebra/zebra_script.c19
-rw-r--r--zebra/zebra_srv6.c1
-rw-r--r--zebra/zebra_vxlan.c18
-rw-r--r--zebra/zserv.c12
-rw-r--r--zebra/zserv.h10
101 files changed, 5600 insertions, 984 deletions
diff --git a/babeld/kernel.c b/babeld/kernel.c
index 3941db8d5f..5aa01ceb44 100644
--- a/babeld/kernel.c
+++ b/babeld/kernel.c
@@ -227,10 +227,10 @@ if_eui64(int ifindex, unsigned char *eui)
/* Like gettimeofday, but returns monotonic time. If POSIX clocks are not
available, falls back to gettimeofday but enforces monotonicity. */
-int
+void
gettime(struct timeval *tv)
{
- return monotime(tv);
+ monotime(tv);
}
/* If /dev/urandom doesn't exist, this will fail with ENOENT, which the
diff --git a/babeld/kernel.h b/babeld/kernel.h
index 5b1437ef3e..f39bc35bdb 100644
--- a/babeld/kernel.h
+++ b/babeld/kernel.h
@@ -43,7 +43,7 @@ int kernel_route(enum babel_kernel_routes operation, const unsigned char *dest,
unsigned int metric, const unsigned char *newgate,
int newifindex, unsigned int newmetric);
int if_eui64(int ifindex, unsigned char *eui);
-int gettime(struct timeval *tv);
+void gettime(struct timeval *tv);
int read_random_bytes(void *buf, size_t len);
#endif /* BABEL_KERNEL_H */
diff --git a/bgpd/bgp_advertise.c b/bgpd/bgp_advertise.c
index cfbb29df1c..f62a54b03c 100644
--- a/bgpd/bgp_advertise.c
+++ b/bgpd/bgp_advertise.c
@@ -197,7 +197,7 @@ void bgp_adj_in_set(struct bgp_dest *dest, struct peer *peer, struct attr *attr,
adj = XCALLOC(MTYPE_BGP_ADJ_IN, sizeof(struct bgp_adj_in));
adj->peer = peer_lock(peer); /* adj_in peer reference */
adj->attr = bgp_attr_intern(attr);
- adj->uptime = bgp_clock();
+ adj->uptime = monotime(NULL);
adj->addpath_rx_id = addpath_id;
BGP_ADJ_IN_ADD(dest, adj);
bgp_dest_lock_node(dest);
diff --git a/bgpd/bgp_damp.c b/bgpd/bgp_damp.c
index 9acbaf7733..664619078a 100644
--- a/bgpd/bgp_damp.c
+++ b/bgpd/bgp_damp.c
@@ -125,7 +125,7 @@ static void bgp_reuse_timer(struct thread *t)
thread_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
&bdc->t_reuse);
- t_now = bgp_clock();
+ t_now = monotime(NULL);
/* 1. save a pointer to the current zeroth queue head and zero the
list head entry. */
@@ -189,7 +189,7 @@ int bgp_damp_withdraw(struct bgp_path_info *path, struct bgp_dest *dest,
unsigned int last_penalty = 0;
struct bgp_damp_config *bdc = &damp[afi][safi];
- t_now = bgp_clock();
+ t_now = monotime(NULL);
/* Processing Unreachable Messages. */
if (path->extra)
@@ -273,7 +273,7 @@ int bgp_damp_update(struct bgp_path_info *path, struct bgp_dest *dest,
if (!path->extra || !((bdi = path->extra->damp_info)))
return BGP_DAMP_USED;
- t_now = bgp_clock();
+ t_now = monotime(NULL);
bgp_path_info_unset_flag(dest, path, BGP_PATH_HISTORY);
bdi->lastrecord = BGP_RECORD_UPDATE;
@@ -588,7 +588,7 @@ void bgp_damp_info_vty(struct vty *vty, struct bgp_path_info *path, afi_t afi,
return;
/* Calculate new penalty. */
- t_now = bgp_clock();
+ t_now = monotime(NULL);
t_diff = t_now - bdi->t_updated;
penalty = bgp_damp_decay(t_diff, bdi->penalty, bdc);
@@ -642,7 +642,7 @@ const char *bgp_damp_reuse_time_vty(struct vty *vty, struct bgp_path_info *path,
return NULL;
/* Calculate new penalty. */
- t_now = bgp_clock();
+ t_now = monotime(NULL);
t_diff = t_now - bdi->t_updated;
penalty = bgp_damp_decay(t_diff, bdi->penalty, bdc);
diff --git a/bgpd/bgp_dump.c b/bgpd/bgp_dump.c
index 2899b5c8c6..11e84f00b4 100644
--- a/bgpd/bgp_dump.c
+++ b/bgpd/bgp_dump.c
@@ -367,7 +367,7 @@ bgp_dump_route_node_record(int afi, struct bgp_dest *dest,
stream_putw(obuf, path->peer->table_dump_index);
/* Originated */
- stream_putl(obuf, time(NULL) - (bgp_clock() - path->uptime));
+ stream_putl(obuf, time(NULL) - (monotime(NULL) - path->uptime));
/*Path Identifier*/
if (addpath_capable) {
diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c
index 4120524e63..3f627521e7 100644
--- a/bgpd/bgp_ecommunity.c
+++ b/bgpd/bgp_ecommunity.c
@@ -1188,6 +1188,23 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter)
return str_buf;
}
+bool ecommunity_include(struct ecommunity *e1, struct ecommunity *e2)
+{
+ uint32_t i, j;
+
+ if (!e1 || !e2)
+ return false;
+ for (i = 0; i < e1->size; ++i) {
+ for (j = 0; j < e2->size; ++j) {
+ if (!memcmp(e1->val + (i * e1->unit_size),
+ e2->val + (j * e2->unit_size),
+ e1->unit_size))
+ return true;
+ }
+ }
+ return false;
+}
+
bool ecommunity_match(const struct ecommunity *ecom1,
const struct ecommunity *ecom2)
{
diff --git a/bgpd/bgp_ecommunity.h b/bgpd/bgp_ecommunity.h
index f22855c329..84e310c3f9 100644
--- a/bgpd/bgp_ecommunity.h
+++ b/bgpd/bgp_ecommunity.h
@@ -257,6 +257,7 @@ extern struct ecommunity *ecommunity_str2com_ipv6(const char *str, int type,
int keyword_included);
extern char *ecommunity_ecom2str(struct ecommunity *, int, int);
extern void ecommunity_strfree(char **s);
+extern bool ecommunity_include(struct ecommunity *e1, struct ecommunity *e2);
extern bool ecommunity_match(const struct ecommunity *,
const struct ecommunity *);
extern char *ecommunity_str(struct ecommunity *);
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index 0642c966eb..dc15d9c695 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -191,6 +191,11 @@ static void vrf_import_rt_free(struct vrf_irt_node *irt)
XFREE(MTYPE_BGP_EVPN_VRF_IMPORT_RT, irt);
}
+static void hash_vrf_import_rt_free(struct vrf_irt_node *irt)
+{
+ XFREE(MTYPE_BGP_EVPN_VRF_IMPORT_RT, irt);
+}
+
/*
* Function to lookup Import RT node - used to map a RT to set of
* VNIs importing routes with that RT.
@@ -281,6 +286,11 @@ static void import_rt_free(struct bgp *bgp, struct irt_node *irt)
XFREE(MTYPE_BGP_EVPN_IMPORT_RT, irt);
}
+static void hash_import_rt_free(struct irt_node *irt)
+{
+ XFREE(MTYPE_BGP_EVPN_IMPORT_RT, irt);
+}
+
/*
* Function to lookup Import RT node - used to map a RT to set of
* VNIs importing routes with that RT.
@@ -1263,7 +1273,7 @@ static int update_evpn_type5_route_entry(struct bgp *bgp_evpn,
/* Unintern existing, set to new. */
bgp_attr_unintern(&tmp_pi->attr);
tmp_pi->attr = attr_new;
- tmp_pi->uptime = bgp_clock();
+ tmp_pi->uptime = monotime(NULL);
}
}
return 0;
@@ -1626,7 +1636,7 @@ static int update_evpn_route_entry(struct bgp *bgp, struct bgpevpn *vpn,
/* Unintern existing, set to new. */
bgp_attr_unintern(&tmp_pi->attr);
tmp_pi->attr = attr_new;
- tmp_pi->uptime = bgp_clock();
+ tmp_pi->uptime = monotime(NULL);
}
}
@@ -2520,7 +2530,7 @@ static int install_evpn_route_entry_in_vrf(struct bgp *bgp_vrf,
/* Unintern existing, set to new. */
bgp_attr_unintern(&pi->attr);
pi->attr = attr_new;
- pi->uptime = bgp_clock();
+ pi->uptime = monotime(NULL);
}
/* Gateway IP nexthop should be resolved */
@@ -2643,7 +2653,7 @@ static int install_evpn_route_entry(struct bgp *bgp, struct bgpevpn *vpn,
/* Unintern existing, set to new. */
bgp_attr_unintern(&pi->attr);
pi->attr = attr_new;
- pi->uptime = bgp_clock();
+ pi->uptime = monotime(NULL);
}
/* Add this route to remote IP hashtable */
@@ -5355,6 +5365,11 @@ void bgp_evpn_free(struct bgp *bgp, struct bgpevpn *vpn)
XFREE(MTYPE_BGP_EVPN, vpn);
}
+static void hash_evpn_free(struct bgpevpn *vpn)
+{
+ XFREE(MTYPE_BGP_EVPN, vpn);
+}
+
/*
* Import evpn route from global table to VNI/VRF/ESI.
*/
@@ -5963,12 +5978,16 @@ void bgp_evpn_cleanup(struct bgp *bgp)
(void (*)(struct hash_bucket *, void *))free_vni_entry,
bgp);
+ hash_clean(bgp->import_rt_hash, (void (*)(void *))hash_import_rt_free);
hash_free(bgp->import_rt_hash);
bgp->import_rt_hash = NULL;
+ hash_clean(bgp->vrf_import_rt_hash,
+ (void (*)(void *))hash_vrf_import_rt_free);
hash_free(bgp->vrf_import_rt_hash);
bgp->vrf_import_rt_hash = NULL;
+ hash_clean(bgp->vni_svi_hash, (void (*)(void *))hash_evpn_free);
hash_free(bgp->vni_svi_hash);
bgp->vni_svi_hash = NULL;
hash_free(bgp->vnihash);
@@ -6239,9 +6258,6 @@ static void bgp_evpn_remote_ip_hash_iterate(struct bgpevpn *vpn,
static void show_remote_ip_entry(struct hash_bucket *bucket, void *args)
{
char buf[INET6_ADDRSTRLEN];
- char buf2[EVPN_ROUTE_STRLEN];
- struct prefix_evpn *evp;
-
struct listnode *node = NULL;
struct bgp_path_info *pi = NULL;
struct vty *vty = (struct vty *)args;
@@ -6250,11 +6266,8 @@ static void show_remote_ip_entry(struct hash_bucket *bucket, void *args)
vty_out(vty, " Remote IP: %s\n",
ipaddr2str(&ip->addr, buf, sizeof(buf)));
vty_out(vty, " Linked MAC/IP routes:\n");
- for (ALL_LIST_ELEMENTS_RO(ip->macip_path_list, node, pi)) {
- evp = (struct prefix_evpn *)&pi->net->p;
- prefix2str(evp, buf2, sizeof(buf2));
- vty_out(vty, " %s\n", buf2);
- }
+ for (ALL_LIST_ELEMENTS_RO(ip->macip_path_list, node, pi))
+ vty_out(vty, " %pFX\n", &pi->net->p);
}
void bgp_evpn_show_remote_ip_hash(struct hash_bucket *bucket, void *args)
@@ -6438,14 +6451,11 @@ static void bgp_evpn_remote_ip_process_nexthops(struct bgpevpn *vpn,
if (!bnc->nexthop || bnc->nexthop->ifindex != vpn->svi_ifindex)
return;
- if (BGP_DEBUG(nht, NHT)) {
- char buf[PREFIX2STR_BUFFER];
-
- prefix2str(&bnc->prefix, buf, sizeof(buf));
- zlog_debug("%s(%u): vni %u mac/ip %s for NH %s",
+ if (BGP_DEBUG(nht, NHT))
+ zlog_debug("%s(%u): vni %u mac/ip %s for NH %pFX",
vpn->bgp_vrf->name_pretty, vpn->tenant_vrf_id,
- vpn->vni, (resolve ? "add" : "delete"), buf);
- }
+ vpn->vni, (resolve ? "add" : "delete"),
+ &bnc->prefix);
/*
* MAC/IP route or SVI or tenant vrf being added to EVI.
diff --git a/bgpd/bgp_evpn_mh.c b/bgpd/bgp_evpn_mh.c
index 3f801f7ea0..de63618580 100644
--- a/bgpd/bgp_evpn_mh.c
+++ b/bgpd/bgp_evpn_mh.c
@@ -233,7 +233,7 @@ static int bgp_evpn_es_route_install(struct bgp *bgp,
/* Unintern existing, set to new. */
bgp_attr_unintern(&pi->attr);
pi->attr = attr_new;
- pi->uptime = bgp_clock();
+ pi->uptime = monotime(NULL);
}
/* Perform route selection and update zebra, if required. */
@@ -432,7 +432,7 @@ int bgp_evpn_mh_route_update(struct bgp *bgp, struct bgp_evpn_es *es,
/* Unintern existing, set to new. */
bgp_attr_unintern(&tmp_pi->attr);
tmp_pi->attr = attr_new;
- tmp_pi->uptime = bgp_clock();
+ tmp_pi->uptime = monotime(NULL);
}
}
@@ -4548,6 +4548,11 @@ static void bgp_evpn_nh_del(struct bgp_evpn_nh *n)
XFREE(MTYPE_BGP_EVPN_NH, tmp_n);
}
+static void hash_evpn_nh_free(struct bgp_evpn_nh *ben)
+{
+ XFREE(MTYPE_BGP_EVPN_NH, ben);
+}
+
static unsigned int bgp_evpn_nh_hash_keymake(const void *p)
{
const struct bgp_evpn_nh *n = p;
@@ -4612,6 +4617,7 @@ void bgp_evpn_nh_finish(struct bgp *bgp_vrf)
bgp_vrf->evpn_nh_table,
(void (*)(struct hash_bucket *, void *))bgp_evpn_nh_flush_cb,
NULL);
+ hash_clean(bgp_vrf->evpn_nh_table, (void (*)(void *))hash_evpn_nh_free);
hash_free(bgp_vrf->evpn_nh_table);
bgp_vrf->evpn_nh_table = NULL;
}
diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c
index 6ba516c39c..f6b87dccdb 100644
--- a/bgpd/bgp_evpn_vty.c
+++ b/bgpd/bgp_evpn_vty.c
@@ -614,14 +614,10 @@ static void show_esi_routes(struct bgp *bgp,
for (dest = bgp_table_top(es->route_table); dest;
dest = bgp_route_next(dest)) {
int add_prefix_to_json = 0;
- char prefix_str[BUFSIZ];
json_object *json_paths = NULL;
json_object *json_prefix = NULL;
const struct prefix *p = bgp_dest_get_prefix(dest);
- prefix2str((struct prefix_evpn *)p, prefix_str,
- sizeof(prefix_str));
-
if (json)
json_prefix = json_object_new_object();
@@ -661,14 +657,14 @@ static void show_esi_routes(struct bgp *bgp,
if (json) {
if (add_prefix_to_json) {
- json_object_string_add(json_prefix, "prefix",
- prefix_str);
+ json_object_string_addf(json_prefix, "prefix",
+ "%pFX", p);
json_object_int_add(json_prefix, "prefixLen",
p->prefixlen);
json_object_object_add(json_prefix, "paths",
json_paths);
- json_object_object_add(json, prefix_str,
- json_prefix);
+ json_object_object_addf(json, json_prefix,
+ "%pFX", p);
} else {
json_object_free(json_paths);
json_object_free(json_prefix);
@@ -800,14 +796,10 @@ static void show_vni_routes(struct bgp *bgp, struct bgpevpn *vpn, int type,
const struct prefix_evpn *evp =
(const struct prefix_evpn *)bgp_dest_get_prefix(dest);
int add_prefix_to_json = 0;
- char prefix_str[BUFSIZ];
json_object *json_paths = NULL;
json_object *json_prefix = NULL;
const struct prefix *p = bgp_dest_get_prefix(dest);
- prefix2str((struct prefix_evpn *)bgp_dest_get_prefix(dest),
- prefix_str, sizeof(prefix_str));
-
if (type && evp->prefix.route_type != type)
continue;
@@ -861,14 +853,14 @@ static void show_vni_routes(struct bgp *bgp, struct bgpevpn *vpn, int type,
if (json) {
if (add_prefix_to_json) {
- json_object_string_add(json_prefix, "prefix",
- prefix_str);
+ json_object_string_addf(json_prefix, "prefix",
+ "%pFX", p);
json_object_int_add(json_prefix, "prefixLen",
p->prefixlen);
json_object_object_add(json_prefix, "paths",
json_paths);
- json_object_object_add(json, prefix_str,
- json_prefix);
+ json_object_object_addf(json, json_prefix,
+ "%pFX", p);
} else {
json_object_free(json_paths);
json_object_free(json_prefix);
@@ -1020,13 +1012,12 @@ static void show_l3vni_entry(struct vty *vty, struct bgp *bgp,
/* If there are multiple export RTs we break here and show only
* one */
- if (!json)
+ if (!json) {
+ vty_out(vty, "%-37s", vrf_id_to_name(bgp->vrf_id));
break;
+ }
}
- if (!json)
- vty_out(vty, "%-37s", vrf_id_to_name(bgp->vrf_id));
-
if (json) {
char vni_str[VNI_STR_LEN];
@@ -1159,13 +1150,13 @@ static void show_vni_entry(struct hash_bucket *bucket, void *args[])
/* If there are multiple export RTs we break here and show only
* one */
- if (!json)
+ if (!json) {
+ vty_out(vty, "%-37s",
+ vrf_id_to_name(vpn->tenant_vrf_id));
break;
+ }
}
- if (!json)
- vty_out(vty, "%-37s", vrf_id_to_name(vpn->tenant_vrf_id));
-
if (json) {
char vni_str[VNI_STR_LEN];
@@ -1190,7 +1181,6 @@ static int bgp_show_ethernet_vpn(struct vty *vty, struct prefix_rd *prd,
int rd_header;
int header = 1;
char rd_str[RD_ADDRSTRLEN];
- char buf[BUFSIZ];
int no_display;
unsigned long output_count = 0;
@@ -1353,20 +1343,17 @@ static int bgp_show_ethernet_vpn(struct vty *vty, struct prefix_rd *prd,
json_prefix_info = json_object_new_object();
- prefix2str((struct prefix_evpn *)p, buf,
- BUFSIZ);
-
- json_object_string_addf(
- json_prefix_info, "prefix", "%pFX",
- (struct prefix_evpn *)p);
+ json_object_string_addf(json_prefix_info,
+ "prefix", "%pFX", p);
json_object_int_add(json_prefix_info,
"prefixLen", p->prefixlen);
json_object_object_add(json_prefix_info,
"paths", json_array);
- json_object_object_add(json_nroute, buf,
- json_prefix_info);
+ json_object_object_addf(json_nroute,
+ json_prefix_info,
+ "%pFX", p);
json_array = NULL;
}
}
@@ -2574,7 +2561,6 @@ static void evpn_show_route_rd_macip(struct vty *vty, struct bgp *bgp,
safi_t safi;
uint32_t path_cnt = 0;
json_object *json_paths = NULL;
- char prefix_str[BUFSIZ];
afi = AFI_L2VPN;
safi = SAFI_EVPN;
@@ -2593,8 +2579,6 @@ static void evpn_show_route_rd_macip(struct vty *vty, struct bgp *bgp,
return;
}
- prefix2str(&p, prefix_str, sizeof(prefix_str));
-
/* Prefix and num paths displayed once per prefix. */
route_vty_out_detail_header(vty, bgp, dest, prd, afi, safi, json);
@@ -2619,7 +2603,7 @@ static void evpn_show_route_rd_macip(struct vty *vty, struct bgp *bgp,
if (json && path_cnt) {
if (path_cnt)
- json_object_object_add(json, prefix_str, json_paths);
+ json_object_object_addf(json, json_paths, "%pFX", &p);
json_object_int_add(json, "numPaths", path_cnt);
} else {
vty_out(vty, "\nDisplayed %u paths for requested prefix\n",
@@ -2678,12 +2662,8 @@ static void evpn_show_route_rd(struct vty *vty, struct bgp *bgp,
(const struct prefix_evpn *)bgp_dest_get_prefix(dest);
json_object *json_prefix = NULL;
json_object *json_paths = NULL;
- char prefix_str[BUFSIZ];
int add_prefix_to_json = 0;
- prefix2str((struct prefix_evpn *)evp, prefix_str,
- sizeof(prefix_str));
-
if (type && evp->prefix.route_type != type)
continue;
@@ -2739,8 +2719,8 @@ static void evpn_show_route_rd(struct vty *vty, struct bgp *bgp,
if (add_prefix_to_json) {
json_object_object_add(json_prefix, "paths",
json_paths);
- json_object_object_add(json_rd, prefix_str,
- json_prefix);
+ json_object_object_addf(json_rd, json_prefix,
+ "%pFX", evp);
} else {
json_object_free(json_paths);
json_object_free(json_prefix);
@@ -2798,7 +2778,6 @@ static void evpn_show_route_rd_all_macip(struct vty *vty, struct bgp *bgp,
json_object *json_prefix = NULL; /* prefix within an RD */
json_object *json_rd = NULL; /* holds all prefixes for RD */
char rd_str[RD_ADDRSTRLEN];
- char prefix_str[BUFSIZ];
int add_rd_to_json = 0;
struct prefix_evpn ep;
const struct prefix *rd_destp = bgp_dest_get_prefix(rd_dest);
@@ -2825,8 +2804,6 @@ static void evpn_show_route_rd_all_macip(struct vty *vty, struct bgp *bgp,
const struct prefix *p = bgp_dest_get_prefix(dest);
- prefix2str(p, prefix_str, sizeof(prefix_str));
-
pi = bgp_dest_get_bgp_path_info(dest);
if (pi) {
/* RD header - per RD. */
@@ -2838,8 +2815,8 @@ static void evpn_show_route_rd_all_macip(struct vty *vty, struct bgp *bgp,
if (json) {
json_prefix = json_object_new_object();
json_paths = json_object_new_array();
- json_object_string_add(json_prefix, "prefix",
- prefix_str);
+ json_object_string_addf(json_prefix, "prefix", "%pFX",
+ p);
json_object_int_add(json_prefix, "prefixLen",
p->prefixlen);
} else
@@ -2873,8 +2850,8 @@ static void evpn_show_route_rd_all_macip(struct vty *vty, struct bgp *bgp,
if (json) {
json_object_object_add(json_prefix, "paths",
json_paths);
- json_object_object_add(json_rd, prefix_str,
- json_prefix);
+ json_object_object_addf(json_rd, json_prefix, "%pFX",
+ p);
if (add_rd_to_json)
json_object_object_add(json, rd_str, json_rd);
else {
@@ -2954,13 +2931,9 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
const struct prefix_evpn *evp =
(const struct prefix_evpn *)bgp_dest_get_prefix(
dest);
- char prefix_str[BUFSIZ];
int add_prefix_to_json = 0;
const struct prefix *p = bgp_dest_get_prefix(dest);
- prefix2str((struct prefix_evpn *)p, prefix_str,
- sizeof(prefix_str));
-
if (type && evp->prefix.route_type != type)
continue;
@@ -2992,8 +2965,8 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
if (json) {
json_prefix = json_object_new_object();
json_paths = json_object_new_array();
- json_object_string_add(json_prefix, "prefix",
- prefix_str);
+ json_object_string_addf(json_prefix, "prefix",
+ "%pFX", p);
json_object_int_add(json_prefix, "prefixLen",
p->prefixlen);
}
@@ -3038,9 +3011,9 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
json_object_object_add(json_prefix,
"paths",
json_paths);
- json_object_object_add(json_rd,
- prefix_str,
- json_prefix);
+ json_object_object_addf(json_rd,
+ json_prefix,
+ "%pFX", p);
} else {
json_object_free(json_prefix);
json_object_free(json_paths);
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index 5534e54808..1164546df7 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -300,6 +300,11 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
peer->afc_recv[afi][safi] = from_peer->afc_recv[afi][safi];
peer->orf_plist[afi][safi] = from_peer->orf_plist[afi][safi];
peer->llgr[afi][safi] = from_peer->llgr[afi][safi];
+ if (from_peer->soo[afi][safi]) {
+ ecommunity_free(&peer->soo[afi][safi]);
+ peer->soo[afi][safi] =
+ ecommunity_dup(from_peer->soo[afi][safi]);
+ }
}
if (bgp_getsockname(peer) < 0) {
@@ -574,7 +579,7 @@ void bgp_routeadv_timer(struct thread *thread)
zlog_debug("%s [FSM] Timer (routeadv timer expire)",
peer->host);
- peer->synctime = bgp_clock();
+ peer->synctime = monotime(NULL);
thread_add_timer_msec(bm->master, bgp_generate_updgrp_packets, peer, 0,
&peer->t_generate_updgrp_packets);
@@ -975,7 +980,7 @@ void bgp_start_routeadv(struct bgp *bgp)
*/
void bgp_adjust_routeadv(struct peer *peer)
{
- time_t nowtime = bgp_clock();
+ time_t nowtime = monotime(NULL);
double diff;
unsigned long remain;
@@ -987,7 +992,7 @@ void bgp_adjust_routeadv(struct peer *peer)
*/
THREAD_OFF(peer->t_routeadv);
- peer->synctime = bgp_clock();
+ peer->synctime = monotime(NULL);
/* If suppress fib pending is enabled, route is advertised to
* peers when the status is received from the FIB. The delay
* is added to update group packet generate which will allow
@@ -1471,7 +1476,7 @@ int bgp_stop(struct peer *peer)
}
/* set last reset time */
- peer->resettime = peer->uptime = bgp_clock();
+ peer->resettime = peer->uptime = monotime(NULL);
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
zlog_debug("%s remove from all update group",
@@ -2219,7 +2224,7 @@ static int bgp_establish(struct peer *peer)
if (!peer->v_holdtime)
bgp_keepalives_on(peer);
- peer->uptime = bgp_clock();
+ peer->uptime = monotime(NULL);
/* Send route-refresh when ORF is enabled.
* Stop Long-lived Graceful Restart timers.
diff --git a/bgpd/bgp_io.c b/bgpd/bgp_io.c
index aba28fa504..7af1fae280 100644
--- a/bgpd/bgp_io.c
+++ b/bgpd/bgp_io.c
@@ -431,7 +431,7 @@ static uint16_t bgp_write(struct peer *peer)
}
done : {
- now = bgp_clock();
+ now = monotime(NULL);
/*
* Update last_update if UPDATEs were written.
* Note: that these are only updated at end,
diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c
index 8772afd736..fa1dcf33e0 100644
--- a/bgpd/bgp_labelpool.c
+++ b/bgpd/bgp_labelpool.c
@@ -719,16 +719,14 @@ DEFUN(show_bgp_labelpool_ledger, show_bgp_labelpool_ledger_cmd,
vty_out(vty, "%-18s %u\n",
"INVALID", lcb->label);
else {
- char buf[PREFIX2STR_BUFFER];
p = bgp_dest_get_prefix(dest);
- prefix2str(p, buf, sizeof(buf));
if (uj) {
- json_object_string_add(json_elem,
- "prefix", buf);
+ json_object_string_addf(
+ json_elem, "prefix", "%pFX", p);
json_object_int_add(json_elem, "label",
lcb->label);
} else
- vty_out(vty, "%-18s %u\n", buf,
+ vty_out(vty, "%-18pFX %u\n", p,
lcb->label);
}
break;
@@ -812,16 +810,14 @@ DEFUN(show_bgp_labelpool_inuse, show_bgp_labelpool_inuse_cmd,
vty_out(vty, "INVALID %u\n",
label);
else {
- char buf[PREFIX2STR_BUFFER];
p = bgp_dest_get_prefix(dest);
- prefix2str(p, buf, sizeof(buf));
if (uj) {
- json_object_string_add(json_elem,
- "prefix", buf);
+ json_object_string_addf(
+ json_elem, "prefix", "%pFX", p);
json_object_int_add(json_elem, "label",
label);
} else
- vty_out(vty, "%-18s %u\n", buf,
+ vty_out(vty, "%-18pFX %u\n", p,
label);
}
break;
@@ -851,7 +847,6 @@ DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd,
json_object *json = NULL, *json_elem = NULL;
struct bgp_dest *dest;
const struct prefix *p;
- char buf[PREFIX2STR_BUFFER];
struct lp_fifo *item, *next;
int count;
@@ -893,12 +888,11 @@ DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd,
vty_out(vty, "INVALID\n");
} else {
p = bgp_dest_get_prefix(dest);
- prefix2str(p, buf, sizeof(buf));
if (uj)
- json_object_string_add(json_elem,
- "prefix", buf);
+ json_object_string_addf(
+ json_elem, "prefix", "%pFX", p);
else
- vty_out(vty, "%-18s\n", buf);
+ vty_out(vty, "%-18pFX\n", p);
}
break;
case LP_TYPE_VRF:
diff --git a/bgpd/bgp_memory.c b/bgpd/bgp_memory.c
index b9f1ba3971..850657d35e 100644
--- a/bgpd/bgp_memory.c
+++ b/bgpd/bgp_memory.c
@@ -131,14 +131,6 @@ DEFINE_MTYPE(BGPD, BGP_EVPN_ES_EVI, "BGP EVPN ES-per-EVI Information");
DEFINE_MTYPE(BGPD, BGP_EVPN_ES_VRF, "BGP EVPN ES-per-VRF Information");
DEFINE_MTYPE(BGPD, BGP_EVPN_IMPORT_RT, "BGP EVPN Import RT");
DEFINE_MTYPE(BGPD, BGP_EVPN_VRF_IMPORT_RT, "BGP EVPN VRF Import RT");
-DEFINE_MTYPE(BGPD, BGP_EVPN_MACIP, "BGP EVPN MAC IP");
-
-DEFINE_MTYPE(BGPD, BGP_FLOWSPEC, "BGP flowspec");
-DEFINE_MTYPE(BGPD, BGP_FLOWSPEC_RULE, "BGP flowspec rule");
-DEFINE_MTYPE(BGPD, BGP_FLOWSPEC_RULE_STR, "BGP flowspec rule str");
-DEFINE_MTYPE(BGPD, BGP_FLOWSPEC_COMPILED, "BGP flowspec compiled");
-DEFINE_MTYPE(BGPD, BGP_FLOWSPEC_NAME, "BGP flowspec name");
-DEFINE_MTYPE(BGPD, BGP_FLOWSPEC_INDEX, "BGP flowspec index");
DEFINE_MTYPE(BGPD, BGP_SRV6_L3VPN, "BGP prefix-sid srv6 l3vpn servcie");
DEFINE_MTYPE(BGPD, BGP_SRV6_VPN, "BGP prefix-sid srv6 vpn service");
diff --git a/bgpd/bgp_memory.h b/bgpd/bgp_memory.h
index d4d7b0cf88..510cfa21c9 100644
--- a/bgpd/bgp_memory.h
+++ b/bgpd/bgp_memory.h
@@ -128,14 +128,6 @@ DECLARE_MTYPE(BGP_EVPN_ES_EVI_VTEP);
DECLARE_MTYPE(BGP_EVPN);
DECLARE_MTYPE(BGP_EVPN_IMPORT_RT);
DECLARE_MTYPE(BGP_EVPN_VRF_IMPORT_RT);
-DECLARE_MTYPE(BGP_EVPN_MACIP);
-
-DECLARE_MTYPE(BGP_FLOWSPEC);
-DECLARE_MTYPE(BGP_FLOWSPEC_RULE);
-DECLARE_MTYPE(BGP_FLOWSPEC_RULE_STR);
-DECLARE_MTYPE(BGP_FLOWSPEC_COMPILED);
-DECLARE_MTYPE(BGP_FLOWSPEC_NAME);
-DECLARE_MTYPE(BGP_FLOWSPEC_INDEX);
DECLARE_MTYPE(BGP_SRV6_L3VPN);
DECLARE_MTYPE(BGP_SRV6_VPN);
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index 7b8f0df2e2..2f1cc4dc82 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -662,25 +662,6 @@ void transpose_sid(struct in6_addr *sid, uint32_t label, uint8_t offset,
}
}
-static bool ecom_intersect(struct ecommunity *e1, struct ecommunity *e2)
-{
- uint32_t i, j;
-
- if (!e1 || !e2)
- return false;
- for (i = 0; i < e1->size; ++i) {
- for (j = 0; j < e2->size; ++j) {
- if (!memcmp(e1->val + (i * e1->unit_size),
- e2->val + (j * e2->unit_size),
- e1->unit_size)) {
-
- return true;
- }
- }
- }
- return false;
-}
-
static bool labels_same(struct bgp_path_info *bpi, mpls_label_t *label,
uint32_t n)
{
@@ -925,7 +906,7 @@ leak_update(struct bgp *to_bgp, struct bgp_dest *bn,
bgp_aggregate_decrement(to_bgp, p, bpi, afi, safi);
bgp_attr_unintern(&bpi->attr);
bpi->attr = new_attr;
- bpi->uptime = bgp_clock();
+ bpi->uptime = monotime(NULL);
/*
* rewrite labels
@@ -1502,7 +1483,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
}
/* Check for intersection of route targets */
- if (!ecom_intersect(
+ if (!ecommunity_include(
to_bgp->vpn_policy[afi].rtlist[BGP_VPN_POLICY_DIR_FROMVPN],
bgp_attr_get_ecommunity(path_vpn->attr))) {
if (debug)
@@ -1751,9 +1732,10 @@ void vpn_leak_to_vrf_withdraw(struct bgp *from_bgp, /* from */
}
/* Check for intersection of route targets */
- if (!ecom_intersect(bgp->vpn_policy[afi]
- .rtlist[BGP_VPN_POLICY_DIR_FROMVPN],
- bgp_attr_get_ecommunity(path_vpn->attr))) {
+ if (!ecommunity_include(
+ bgp->vpn_policy[afi]
+ .rtlist[BGP_VPN_POLICY_DIR_FROMVPN],
+ bgp_attr_get_ecommunity(path_vpn->attr))) {
continue;
}
@@ -2932,7 +2914,7 @@ vrf_id_t get_first_vrf_for_redirect_with_rt(struct ecommunity *eckey)
if (ec && eckey->unit_size != ec->unit_size)
continue;
- if (ecom_intersect(ec, eckey))
+ if (ecommunity_include(ec, eckey))
return bgp->vrf_id;
}
return VRF_UNKNOWN;
diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c
index e1fcc743ec..971b1817c8 100644
--- a/bgpd/bgp_nexthop.c
+++ b/bgpd/bgp_nexthop.c
@@ -64,11 +64,6 @@ int bgp_nexthop_cache_compare(const struct bgp_nexthop_cache *a,
return prefix_cmp(&a->prefix, &b->prefix);
}
-const char *bnc_str(struct bgp_nexthop_cache *bnc, char *buf, int size)
-{
- return prefix2str(&bnc->prefix, buf, size);
-}
-
void bnc_nexthop_free(struct bgp_nexthop_cache *bnc)
{
nexthops_free(bnc->nexthop);
@@ -868,7 +863,7 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp,
if (!CHECK_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED))
vty_out(vty, " Is not Registered\n");
}
- tbuf = time(NULL) - (bgp_clock() - bnc->last_update);
+ tbuf = time(NULL) - (monotime(NULL) - bnc->last_update);
vty_out(vty, " Last update: %s", ctime(&tbuf));
vty_out(vty, "\n");
diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h
index 9d653ef4dc..efad906d0a 100644
--- a/bgpd/bgp_nexthop.h
+++ b/bgpd/bgp_nexthop.h
@@ -161,7 +161,6 @@ extern struct bgp_nexthop_cache *bnc_find(struct bgp_nexthop_cache_head *tree,
uint32_t srte_color,
ifindex_t ifindex);
extern void bnc_nexthop_free(struct bgp_nexthop_cache *bnc);
-extern const char *bnc_str(struct bgp_nexthop_cache *bnc, char *buf, int size);
extern void bgp_scan_init(struct bgp *bgp);
extern void bgp_scan_finish(struct bgp *bgp);
extern void bgp_scan_vty_init(void);
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index 344608fda1..61f1b295ca 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -82,13 +82,10 @@ static int bgp_isvalid_labeled_nexthop(struct bgp_nexthop_cache *bnc)
static void bgp_unlink_nexthop_check(struct bgp_nexthop_cache *bnc)
{
if (LIST_EMPTY(&(bnc->paths)) && !bnc->nht_info) {
- if (BGP_DEBUG(nht, NHT)) {
- char buf[PREFIX2STR_BUFFER];
- zlog_debug("%s: freeing bnc %s(%d)(%u)(%s)", __func__,
- bnc_str(bnc, buf, PREFIX2STR_BUFFER),
- bnc->ifindex, bnc->srte_color,
+ if (BGP_DEBUG(nht, NHT))
+ zlog_debug("%s: freeing bnc %pFX(%d)(%u)(%s)", __func__,
+ &bnc->prefix, bnc->ifindex, bnc->srte_color,
bnc->bgp->name_pretty);
- }
/* only unregister if this is the last nh for this prefix*/
if (!bnc_existing_for_prefix(bnc))
unregister_zebra_rnh(bnc);
@@ -261,24 +258,17 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
if (!bnc) {
bnc = bnc_new(tree, &p, srte_color, ifindex);
bnc->bgp = bgp_nexthop;
- if (BGP_DEBUG(nht, NHT)) {
- char buf[PREFIX2STR_BUFFER];
-
- zlog_debug("Allocated bnc %s(%d)(%u)(%s) peer %p",
- bnc_str(bnc, buf, PREFIX2STR_BUFFER),
- bnc->ifindex, bnc->srte_color,
+ if (BGP_DEBUG(nht, NHT))
+ zlog_debug("Allocated bnc %pFX(%d)(%u)(%s) peer %p",
+ &bnc->prefix, bnc->ifindex, bnc->srte_color,
bnc->bgp->name_pretty, peer);
- }
} else {
- if (BGP_DEBUG(nht, NHT)) {
- char buf[PREFIX2STR_BUFFER];
-
+ if (BGP_DEBUG(nht, NHT))
zlog_debug(
- "Found existing bnc %s(%d)(%s) flags 0x%x ifindex %d #paths %d peer %p",
- bnc_str(bnc, buf, PREFIX2STR_BUFFER),
- bnc->ifindex, bnc->bgp->name_pretty, bnc->flags,
- bnc->ifindex, bnc->path_count, bnc->nht_info);
- }
+ "Found existing bnc %pFX(%d)(%s) flags 0x%x ifindex %d #paths %d peer %p",
+ &bnc->prefix, bnc->ifindex,
+ bnc->bgp->name_pretty, bnc->flags, bnc->ifindex,
+ bnc->path_count, bnc->nht_info);
}
if (pi && is_route_parent_evpn(pi))
@@ -436,7 +426,7 @@ static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc,
int i;
bool evpn_resolved = false;
- bnc->last_update = bgp_clock();
+ bnc->last_update = monotime(NULL);
bnc->change_flags = 0;
/* debug print the input */
@@ -563,16 +553,12 @@ static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc,
if (bnc->is_evpn_gwip_nexthop) {
evpn_resolved = bgp_evpn_is_gateway_ip_resolved(bnc);
- if (BGP_DEBUG(nht, NHT)) {
- char buf2[PREFIX2STR_BUFFER];
-
- prefix2str(&bnc->prefix, buf2, sizeof(buf2));
+ if (BGP_DEBUG(nht, NHT))
zlog_debug(
- "EVPN gateway IP %s recursive MAC/IP lookup %s",
- buf2,
+ "EVPN gateway IP %pFX recursive MAC/IP lookup %s",
+ &bnc->prefix,
(evpn_resolved ? "successful"
: "failed"));
- }
if (evpn_resolved) {
bnc->flags |= BGP_NEXTHOP_VALID;
@@ -609,7 +595,7 @@ static void bgp_nht_ifp_table_handle(struct bgp *bgp,
if (bnc->ifindex != ifp->ifindex)
continue;
- bnc->last_update = bgp_clock();
+ bnc->last_update = monotime(NULL);
bnc->change_flags = 0;
/*
@@ -1023,14 +1009,12 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
const struct prefix *p;
if (BGP_DEBUG(nht, NHT)) {
- char buf[PREFIX2STR_BUFFER];
char bnc_buf[BNC_FLAG_DUMP_SIZE];
char chg_buf[BNC_FLAG_DUMP_SIZE];
- bnc_str(bnc, buf, PREFIX2STR_BUFFER);
zlog_debug(
- "NH update for %s(%d)(%u)(%s) - flags %s chgflags %s- evaluate paths",
- buf, bnc->ifindex, bnc->srte_color,
+ "NH update for %pFX(%d)(%u)(%s) - flags %s chgflags %s- evaluate paths",
+ &bnc->prefix, bnc->ifindex, bnc->srte_color,
bnc->bgp->name_pretty,
bgp_nexthop_dump_bnc_flags(bnc, bnc_buf,
sizeof(bnc_buf)),
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index fe1887565e..7daac44946 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -131,11 +131,11 @@ static void bgp_packet_add(struct peer *peer, struct stream *s)
* after it'll get confused
*/
if (!stream_fifo_count_safe(peer->obuf))
- peer->last_sendq_ok = bgp_clock();
+ peer->last_sendq_ok = monotime(NULL);
stream_fifo_push(peer->obuf, s);
- delta = bgp_clock() - peer->last_sendq_ok;
+ delta = monotime(NULL) - peer->last_sendq_ok;
holdtime = atomic_load_explicit(&peer->holdtime,
memory_order_relaxed);
@@ -156,12 +156,12 @@ static void bgp_packet_add(struct peer *peer, struct stream *s)
peer->host);
BGP_EVENT_ADD(peer, TCP_fatal_error);
} else if (delta > (intmax_t)holdtime &&
- bgp_clock() - peer->last_sendq_warn > 5) {
+ monotime(NULL) - peer->last_sendq_warn > 5) {
flog_warn(
EC_BGP_SENDQ_STUCK_WARN,
"%s has not made any SendQ progress for 1 holdtime, peer overloaded?",
peer->host);
- peer->last_sendq_warn = bgp_clock();
+ peer->last_sendq_warn = monotime(NULL);
}
}
}
@@ -2026,7 +2026,7 @@ static int bgp_update_receive(struct peer *peer, bgp_size_t size)
interned in bgp_attr_parse(). */
bgp_attr_unintern_sub(&attr);
- peer->update_time = bgp_clock();
+ peer->update_time = monotime(NULL);
/* Notify BGP Conditional advertisement scanner process */
peer->advmap_table_change = true;
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 04f955f97a..f042d0bd95 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -2025,8 +2025,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
&& (IPV4_ADDR_SAME(&onlypeer->remote_id, &piattr->originator_id))) {
if (bgp_debug_update(NULL, p, subgrp->update_group, 0))
zlog_debug(
- "%s [Update:SEND] %pFX originator-id is same as remote router-id",
- onlypeer->host, p);
+ "%pBP [Update:SEND] %pFX originator-id is same as remote router-id",
+ onlypeer, p);
return false;
}
@@ -2041,8 +2041,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
if (bgp_debug_update(NULL, p,
subgrp->update_group, 0))
zlog_debug(
- "%s [Update:SEND] %pFX is filtered via ORF",
- peer->host, p);
+ "%pBP [Update:SEND] %pFX is filtered via ORF",
+ peer, p);
return false;
}
}
@@ -2050,8 +2050,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
/* Output filter check. */
if (bgp_output_filter(peer, p, piattr, afi, safi) == FILTER_DENY) {
if (bgp_debug_update(NULL, p, subgrp->update_group, 0))
- zlog_debug("%s [Update:SEND] %pFX is filtered",
- peer->host, p);
+ zlog_debug("%pBP [Update:SEND] %pFX is filtered", peer,
+ p);
return false;
}
@@ -2060,8 +2060,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
&& aspath_loop_check(piattr->aspath, onlypeer->as)) {
if (bgp_debug_update(NULL, p, subgrp->update_group, 0))
zlog_debug(
- "%s [Update:SEND] suppress announcement to peer AS %u that is part of AS path.",
- onlypeer->host, onlypeer->as);
+ "%pBP [Update:SEND] suppress announcement to peer AS %u that is part of AS path.",
+ onlypeer, onlypeer->as);
return false;
}
@@ -2070,8 +2070,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
if (aspath_loop_check(piattr->aspath, bgp->confed_id)) {
if (bgp_debug_update(NULL, p, subgrp->update_group, 0))
zlog_debug(
- "%s [Update:SEND] suppress announcement to peer AS %u is AS path.",
- peer->host, bgp->confed_id);
+ "%pBP [Update:SEND] suppress announcement to peer AS %u is AS path.",
+ peer, bgp->confed_id);
return false;
}
}
@@ -2278,9 +2278,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
if (ret == RMAP_DENYMATCH) {
if (bgp_debug_update(NULL, p, subgrp->update_group, 0))
zlog_debug(
- "%s [Update:SEND] %pFX is filtered by route-map '%s'",
- peer->host, p,
- ROUTE_MAP_OUT_NAME(filter));
+ "%pBP [Update:SEND] %pFX is filtered by route-map '%s'",
+ peer, p, ROUTE_MAP_OUT_NAME(filter));
bgp_attr_flush(rmap_path.attr);
return false;
}
@@ -2317,6 +2316,29 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
if (aspath_check_as_sets(attr->aspath))
return false;
+ /* If neighbor sso is configured, then check if the route has
+ * SoO extended community and validate against the configured
+ * one. If they match, do not announce, to prevent routing
+ * loops.
+ */
+ if ((attr->flag & ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES)) &&
+ peer->soo[afi][safi]) {
+ struct ecommunity *ecomm_soo = peer->soo[afi][safi];
+ struct ecommunity *ecomm = bgp_attr_get_ecommunity(attr);
+
+ if ((ecommunity_lookup(ecomm, ECOMMUNITY_ENCODE_AS,
+ ECOMMUNITY_SITE_ORIGIN) ||
+ ecommunity_lookup(ecomm, ECOMMUNITY_ENCODE_AS4,
+ ECOMMUNITY_SITE_ORIGIN)) &&
+ ecommunity_include(ecomm, ecomm_soo)) {
+ if (bgp_debug_update(NULL, p, subgrp->update_group, 0))
+ zlog_debug(
+ "%pBP [Update:SEND] %pFX is filtered by SoO extcommunity '%s'",
+ peer, p, ecommunity_str(ecomm_soo));
+ return false;
+ }
+ }
+
/* Codification of AS 0 Processing */
if (aspath_check_as_zero(attr->aspath))
return false;
@@ -3672,7 +3694,7 @@ struct bgp_path_info *info_make(int type, int sub_type, unsigned short instance,
new->sub_type = sub_type;
new->peer = peer;
new->attr = attr;
- new->uptime = bgp_clock();
+ new->uptime = monotime(NULL);
new->net = dest;
return new;
}
@@ -4058,11 +4080,35 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
return -1;
}
+ /* If neighbor soo is configured, tag all incoming routes with
+ * this SoO tag and then filter out advertisements in
+ * subgroup_announce_check() if it matches the configured SoO
+ * on the other peer.
+ */
+ if (peer->soo[afi][safi]) {
+ struct ecommunity *old_ecomm =
+ bgp_attr_get_ecommunity(&new_attr);
+ struct ecommunity *ecomm_soo = peer->soo[afi][safi];
+ struct ecommunity *new_ecomm;
+
+ if (old_ecomm) {
+ new_ecomm = ecommunity_merge(ecommunity_dup(old_ecomm),
+ ecomm_soo);
+
+ if (!old_ecomm->refcnt)
+ ecommunity_free(&old_ecomm);
+ } else {
+ new_ecomm = ecommunity_dup(ecomm_soo);
+ }
+
+ bgp_attr_set_ecommunity(&new_attr, new_ecomm);
+ }
+
attr_new = bgp_attr_intern(&new_attr);
/* If the update is implicit withdraw. */
if (pi) {
- pi->uptime = bgp_clock();
+ pi->uptime = monotime(NULL);
same_attr = attrhash_cmp(pi->attr, attr_new);
hook_call(bgp_process, bgp, afi, safi, dest, peer, true);
@@ -5995,7 +6041,7 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p,
#endif
bgp_attr_unintern(&pi->attr);
pi->attr = attr_new;
- pi->uptime = bgp_clock();
+ pi->uptime = monotime(NULL);
#ifdef ENABLE_BGP_VNC
if ((afi == AFI_IP || afi == AFI_IP6)
&& (safi == SAFI_UNICAST)) {
@@ -6297,7 +6343,7 @@ static void bgp_static_update_safi(struct bgp *bgp, const struct prefix *p,
bgp_aggregate_decrement(bgp, p, pi, afi, safi);
bgp_attr_unintern(&pi->attr);
pi->attr = attr_new;
- pi->uptime = bgp_clock();
+ pi->uptime = monotime(NULL);
#ifdef ENABLE_BGP_VNC
if (pi->extra)
label = decode_label(&pi->extra->label[0]);
@@ -8521,7 +8567,7 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
bgp, p, bpi, afi, SAFI_UNICAST);
bgp_attr_unintern(&bpi->attr);
bpi->attr = new_attr;
- bpi->uptime = bgp_clock();
+ bpi->uptime = monotime(NULL);
/* Process change. */
bgp_aggregate_increment(bgp, p, bpi, afi,
@@ -9456,9 +9502,7 @@ void route_vty_out_tmp(struct vty *vty, struct bgp_dest *dest,
json_object_boolean_true_add(json_status, ">");
json_object_object_add(json_net, "appliedStatusSymbols",
json_status);
-
- prefix2str(p, buff, PREFIX_STRLEN);
- json_object_object_add(json_ar, buff, json_net);
+ json_object_object_addf(json_ar, json_net, "%pFX", p);
} else
vty_out(vty, "\n");
}
@@ -10052,7 +10096,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
vty_out(vty, " Gateway IP %s", gwip_buf);
}
- if (safi == SAFI_EVPN)
+ if (safi == SAFI_EVPN && !json_path)
vty_out(vty, "\n");
/* Line1 display AS-path, Aggregator */
@@ -10808,7 +10852,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
}
/* Line 9 display Uptime */
- tbuf = time(NULL) - (bgp_clock() - path->uptime);
+ tbuf = time(NULL) - (monotime(NULL) - path->uptime);
if (json_paths) {
json_last_update = json_object_new_object();
json_object_int_add(json_last_update, "epoch", tbuf);
@@ -11545,7 +11589,6 @@ void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp,
has_valid_label = bgp_is_valid_label(&label);
if (safi == SAFI_EVPN) {
-
if (!json) {
vty_out(vty, "BGP routing table entry for %s%s%pFX\n",
prd ? prefix_rd2str(prd, buf1, sizeof(buf1))
diff --git a/bgpd/bgp_snmp.c b/bgpd/bgp_snmp.c
index e25d8d90db..6bc313464a 100644
--- a/bgpd/bgp_snmp.c
+++ b/bgpd/bgp_snmp.c
@@ -588,7 +588,7 @@ static uint8_t *bgpPeerTable(struct variable *v, oid name[], size_t *length,
if (peer->uptime == 0)
return SNMP_INTEGER(0);
else
- return SNMP_INTEGER(bgp_clock() - peer->uptime);
+ return SNMP_INTEGER(monotime(NULL) - peer->uptime);
case BGPPEERCONNECTRETRYINTERVAL:
*write_method = write_bgpPeerTable;
return SNMP_INTEGER(peer->v_connect);
@@ -615,7 +615,7 @@ static uint8_t *bgpPeerTable(struct variable *v, oid name[], size_t *length,
if (peer->update_time == 0)
return SNMP_INTEGER(0);
else
- return SNMP_INTEGER(bgp_clock() - peer->update_time);
+ return SNMP_INTEGER(monotime(NULL) - peer->update_time);
default:
return NULL;
}
diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c
index f1173941a0..d713192d00 100644
--- a/bgpd/bgp_updgrp.c
+++ b/bgpd/bgp_updgrp.c
@@ -70,14 +70,14 @@
static void update_group_checkin(struct update_group *updgrp)
{
updgrp->id = ++bm->updgrp_idspace;
- updgrp->uptime = bgp_clock();
+ updgrp->uptime = monotime(NULL);
}
static void update_subgroup_checkin(struct update_subgroup *subgrp,
struct update_group *updgrp)
{
subgrp->id = ++bm->subgrp_idspace;
- subgrp->uptime = bgp_clock();
+ subgrp->uptime = monotime(NULL);
}
static void sync_init(struct update_subgroup *subgrp,
@@ -164,6 +164,12 @@ static void conf_copy(struct peer *dst, struct peer *src, afi_t afi,
dst->change_local_as = src->change_local_as;
dst->shared_network = src->shared_network;
dst->local_role = src->local_role;
+
+ if (src->soo[afi][safi]) {
+ ecommunity_free(&dst->soo[afi][safi]);
+ dst->soo[afi][safi] = ecommunity_dup(src->soo[afi][safi]);
+ }
+
memcpy(&(dst->nexthop), &(src->nexthop), sizeof(struct bgp_nexthop));
dst->group = src->group;
@@ -428,6 +434,12 @@ static unsigned int updgrp_hash_key_make(const void *p)
*/
key = jhash_1word(peer->local_role, key);
+ if (peer->soo[afi][safi]) {
+ char *soo_str = ecommunity_str(peer->soo[afi][safi]);
+
+ key = jhash_1word(jhash(soo_str, strlen(soo_str), SEED1), key);
+ }
+
if (bgp_debug_neighbor_events(peer)) {
zlog_debug(
"%pBP Update Group Hash: sort: %d UpdGrpFlags: %ju UpdGrpAFFlags: %u",
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 0eba5ea447..881426d9c4 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -306,12 +306,16 @@ static int bgp_srv6_locator_unset(struct bgp *bgp)
return -1;
/* refresh chunks */
- for (ALL_LIST_ELEMENTS(bgp->srv6_locator_chunks, node, nnode, chunk))
+ for (ALL_LIST_ELEMENTS(bgp->srv6_locator_chunks, node, nnode, chunk)) {
listnode_delete(bgp->srv6_locator_chunks, chunk);
+ srv6_locator_chunk_free(chunk);
+ }
/* refresh functions */
- for (ALL_LIST_ELEMENTS(bgp->srv6_functions, node, nnode, func))
+ for (ALL_LIST_ELEMENTS(bgp->srv6_functions, node, nnode, func)) {
listnode_delete(bgp->srv6_functions, func);
+ XFREE(MTYPE_BGP_SRV6_FUNCTION, func);
+ }
/* refresh tovpn_sid */
for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp_vrf)) {
@@ -334,6 +338,20 @@ static int bgp_srv6_locator_unset(struct bgp *bgp)
/* update vpn bgp processes */
vpn_leak_postchange_all();
+ /* refresh tovpn_sid_locator */
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp_vrf)) {
+ if (bgp_vrf->inst_type != BGP_INSTANCE_TYPE_VRF)
+ continue;
+
+ /* refresh vpnv4 tovpn_sid_locator */
+ XFREE(MTYPE_BGP_SRV6_SID,
+ bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator);
+
+ /* refresh vpnv6 tovpn_sid_locator */
+ XFREE(MTYPE_BGP_SRV6_SID,
+ bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator);
+ }
+
/* clear locator name */
memset(bgp->srv6_locator_name, 0, sizeof(bgp->srv6_locator_name));
@@ -8228,6 +8246,63 @@ ALIAS_HIDDEN(
"Only give warning message when limit is exceeded\n"
"Force checking all received routes not only accepted\n")
+/* "neighbor soo" */
+DEFPY (neighbor_soo,
+ neighbor_soo_cmd,
+ "neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor soo ASN:NN_OR_IP-ADDRESS:NN$soo",
+ NEIGHBOR_STR
+ NEIGHBOR_ADDR_STR2
+ "Set the Site-of-Origin (SoO) extended community\n"
+ "VPN extended community\n")
+{
+ struct peer *peer;
+ afi_t afi = bgp_node_afi(vty);
+ safi_t safi = bgp_node_safi(vty);
+ struct ecommunity *ecomm_soo;
+
+ peer = peer_and_group_lookup_vty(vty, neighbor);
+ if (!peer)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ ecomm_soo = ecommunity_str2com(soo, ECOMMUNITY_SITE_ORIGIN, 0);
+ if (!ecomm_soo) {
+ vty_out(vty, "%% Malformed SoO extended community\n");
+ return CMD_WARNING;
+ }
+ ecommunity_str(ecomm_soo);
+
+ if (!ecommunity_match(peer->soo[afi][safi], ecomm_soo)) {
+ ecommunity_free(&peer->soo[afi][safi]);
+ peer->soo[afi][safi] = ecomm_soo;
+ peer_af_flag_unset(peer, afi, safi, PEER_FLAG_SOO);
+ }
+
+ return bgp_vty_return(vty,
+ peer_af_flag_set(peer, afi, safi, PEER_FLAG_SOO));
+}
+
+DEFPY (no_neighbor_soo,
+ no_neighbor_soo_cmd,
+ "no neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor soo [ASN:NN_OR_IP-ADDRESS:NN$soo]",
+ NO_STR
+ NEIGHBOR_STR
+ NEIGHBOR_ADDR_STR2
+ "Set the Site-of-Origin (SoO) extended community\n"
+ "VPN extended community\n")
+{
+ struct peer *peer;
+ afi_t afi = bgp_node_afi(vty);
+ safi_t safi = bgp_node_safi(vty);
+
+ peer = peer_and_group_lookup_vty(vty, neighbor);
+ if (!peer)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ ecommunity_free(&peer->soo[afi][safi]);
+
+ return bgp_vty_return(
+ vty, peer_af_flag_unset(peer, afi, safi, PEER_FLAG_SOO));
+}
/* "neighbor allowas-in" */
DEFUN (neighbor_allowas_in,
@@ -9573,10 +9648,8 @@ DEFPY (show_bgp_srv6,
vty_out(vty, "locator_name: %s\n", bgp->srv6_locator_name);
vty_out(vty, "locator_chunks:\n");
- for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) {
- prefix2str(&chunk->prefix, buf, sizeof(buf));
- vty_out(vty, "- %s\n", buf);
- }
+ for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk))
+ vty_out(vty, "- %pFX\n", &chunk->prefix);
vty_out(vty, "functions:\n");
for (ALL_LIST_ELEMENTS_RO(bgp->srv6_functions, node, func)) {
@@ -12723,7 +12796,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
if (peer_established(p)) {
time_t uptime;
- uptime = bgp_clock();
+ uptime = monotime(NULL);
uptime -= p->uptime;
epoch_tbuf = time(NULL) - uptime;
@@ -12751,7 +12824,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
time_t uptime;
struct tm tm;
- uptime = bgp_clock();
+ uptime = monotime(NULL);
uptime -= p->readtime;
gmtime_r(&uptime, &tm);
@@ -12759,7 +12832,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
(tm.tm_sec * 1000) + (tm.tm_min * 60000)
+ (tm.tm_hour * 3600000));
- uptime = bgp_clock();
+ uptime = monotime(NULL);
uptime -= p->last_write;
gmtime_r(&uptime, &tm);
@@ -12767,7 +12840,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
(tm.tm_sec * 1000) + (tm.tm_min * 60000)
+ (tm.tm_hour * 3600000));
- uptime = bgp_clock();
+ uptime = monotime(NULL);
uptime -= p->update_time;
gmtime_r(&uptime, &tm);
@@ -14039,7 +14112,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
time_t uptime;
struct tm tm;
- uptime = bgp_clock();
+ uptime = monotime(NULL);
uptime -= p->resettime;
gmtime_r(&uptime, &tm);
@@ -17221,6 +17294,15 @@ static void bgp_config_write_peer_af(struct vty *vty, struct bgp *bgp,
}
}
+ /* soo */
+ if (peergroup_af_flag_check(peer, afi, safi, PEER_FLAG_SOO)) {
+ char *soo_str = ecommunity_ecom2str(
+ peer->soo[afi][safi], ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+
+ vty_out(vty, " neighbor %s soo %s\n", addr, soo_str);
+ XFREE(MTYPE_ECOMMUNITY_STR, soo_str);
+ }
+
/* weight */
if (peergroup_af_flag_check(peer, afi, safi, PEER_FLAG_WEIGHT))
vty_out(vty, " neighbor %s weight %lu\n", addr,
@@ -19305,6 +19387,26 @@ void bgp_vty_init(void)
install_element(BGP_EVPN_NODE, &neighbor_allowas_in_cmd);
install_element(BGP_EVPN_NODE, &no_neighbor_allowas_in_cmd);
+ /* "neighbor soo" */
+ install_element(BGP_IPV4_NODE, &neighbor_soo_cmd);
+ install_element(BGP_IPV4_NODE, &no_neighbor_soo_cmd);
+ install_element(BGP_IPV4M_NODE, &neighbor_soo_cmd);
+ install_element(BGP_IPV4M_NODE, &no_neighbor_soo_cmd);
+ install_element(BGP_IPV4L_NODE, &neighbor_soo_cmd);
+ install_element(BGP_IPV4L_NODE, &no_neighbor_soo_cmd);
+ install_element(BGP_IPV6_NODE, &neighbor_soo_cmd);
+ install_element(BGP_IPV6_NODE, &no_neighbor_soo_cmd);
+ install_element(BGP_IPV6M_NODE, &neighbor_soo_cmd);
+ install_element(BGP_IPV6M_NODE, &no_neighbor_soo_cmd);
+ install_element(BGP_IPV6L_NODE, &neighbor_soo_cmd);
+ install_element(BGP_IPV6L_NODE, &no_neighbor_soo_cmd);
+ install_element(BGP_VPNV4_NODE, &neighbor_soo_cmd);
+ install_element(BGP_VPNV4_NODE, &no_neighbor_soo_cmd);
+ install_element(BGP_VPNV6_NODE, &neighbor_soo_cmd);
+ install_element(BGP_VPNV6_NODE, &no_neighbor_soo_cmd);
+ install_element(BGP_EVPN_NODE, &neighbor_soo_cmd);
+ install_element(BGP_EVPN_NODE, &no_neighbor_soo_cmd);
+
/* address-family commands. */
install_element(BGP_NODE, &address_family_ipv4_safi_cmd);
install_element(BGP_NODE, &address_family_ipv6_safi_cmd);
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index 9c9b88e125..85e25b88a4 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -456,7 +456,7 @@ static int bgp_interface_vrf_update(ZAPI_CALLBACK_ARGS)
if (!ifp)
return 0;
- if (BGP_DEBUG(zebra, ZEBRA) && ifp)
+ if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("Rx Intf VRF change VRF %u IF %s NewVRF %u", vrf_id,
ifp->name, new_vrf_id);
@@ -3209,7 +3209,7 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS)
struct srv6_locator_chunk *chunk;
struct bgp_srv6_function *func;
struct bgp *bgp_vrf;
- struct in6_addr *tovpn_sid;
+ struct in6_addr *tovpn_sid, *tovpn_sid_locator;
struct prefix_ipv6 tmp_prefi;
if (zapi_srv6_locator_decode(zclient->ibuf, &loc) < 0)
@@ -3218,8 +3218,10 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS)
// refresh chunks
for (ALL_LIST_ELEMENTS(bgp->srv6_locator_chunks, node, nnode, chunk))
if (prefix_match((struct prefix *)&loc.prefix,
- (struct prefix *)&chunk->prefix))
+ (struct prefix *)&chunk->prefix)) {
listnode_delete(bgp->srv6_locator_chunks, chunk);
+ srv6_locator_chunk_free(chunk);
+ }
// refresh functions
for (ALL_LIST_ELEMENTS(bgp->srv6_functions, node, nnode, func)) {
@@ -3227,8 +3229,10 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS)
tmp_prefi.prefixlen = 128;
tmp_prefi.prefix = func->sid;
if (prefix_match((struct prefix *)&loc.prefix,
- (struct prefix *)&tmp_prefi))
+ (struct prefix *)&tmp_prefi)) {
listnode_delete(bgp->srv6_functions, func);
+ XFREE(MTYPE_BGP_SRV6_FUNCTION, func);
+ }
}
// refresh tovpn_sid
@@ -3262,6 +3266,37 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS)
}
vpn_leak_postchange_all();
+
+ /* refresh tovpn_sid_locator */
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp_vrf)) {
+ if (bgp_vrf->inst_type != BGP_INSTANCE_TYPE_VRF)
+ continue;
+
+ /* refresh vpnv4 tovpn_sid_locator */
+ tovpn_sid_locator =
+ bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator;
+ if (tovpn_sid_locator) {
+ tmp_prefi.family = AF_INET6;
+ tmp_prefi.prefixlen = IPV6_MAX_BITLEN;
+ tmp_prefi.prefix = *tovpn_sid_locator;
+ if (prefix_match((struct prefix *)&loc.prefix,
+ (struct prefix *)&tmp_prefi))
+ XFREE(MTYPE_BGP_SRV6_SID, tovpn_sid_locator);
+ }
+
+ /* refresh vpnv6 tovpn_sid_locator */
+ tovpn_sid_locator =
+ bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator;
+ if (tovpn_sid_locator) {
+ tmp_prefi.family = AF_INET6;
+ tmp_prefi.prefixlen = IPV6_MAX_BITLEN;
+ tmp_prefi.prefix = *tovpn_sid_locator;
+ if (prefix_match((struct prefix *)&loc.prefix,
+ (struct prefix *)&tmp_prefi))
+ XFREE(MTYPE_BGP_SRV6_SID, tovpn_sid_locator);
+ }
+ }
+
return 0;
}
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index c17bd76ad7..9a9e287da9 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -531,17 +531,6 @@ void bgp_cluster_id_unset(struct bgp *bgp)
}
}
-/* time_t value that is monotonicly increasing
- * and uneffected by adjustments to system clock
- */
-time_t bgp_clock(void)
-{
- struct timeval tv;
-
- monotime(&tv);
- return tv.tv_sec;
-}
-
/* BGP timer configuration. */
void bgp_timers_set(struct bgp *bgp, uint32_t keepalive, uint32_t holdtime,
uint32_t connect_retry, uint32_t delayopen)
@@ -1377,6 +1366,7 @@ struct peer *peer_new(struct bgp *bgp)
SET_FLAG(peer->af_flags_invert[afi][safi],
PEER_FLAG_SEND_LARGE_COMMUNITY);
peer->addpath_type[afi][safi] = BGP_ADDPATH_NONE;
+ peer->soo[afi][safi] = NULL;
}
/* set nexthop-unchanged for l2vpn evpn by default */
@@ -1483,6 +1473,11 @@ void peer_xfer_config(struct peer *peer_dst, struct peer *peer_src)
peer_dst->weight[afi][safi] = peer_src->weight[afi][safi];
peer_dst->addpath_type[afi][safi] =
peer_src->addpath_type[afi][safi];
+ if (peer_src->soo[afi][safi]) {
+ ecommunity_free(&peer_dst->soo[afi][safi]);
+ peer_dst->soo[afi][safi] =
+ ecommunity_dup(peer_src->soo[afi][safi]);
+ }
}
for (afidx = BGP_AF_START; afidx < BGP_AF_MAX; afidx++) {
@@ -1760,7 +1755,7 @@ struct peer *peer_create(union sockunion *su, const char *conf_if,
}
/* Last read and reset time set */
- peer->readtime = peer->resettime = bgp_clock();
+ peer->readtime = peer->resettime = monotime(NULL);
/* Default TTL set. */
peer->ttl = (peer->sort == BGP_PEER_IBGP) ? MAXTTL : BGP_DEFAULT_TTL;
@@ -2042,6 +2037,10 @@ static void peer_group2peer_config_copy_af(struct peer_group *group,
if (!CHECK_FLAG(pflags_ovrd, PEER_FLAG_ALLOWAS_IN))
PEER_ATTR_INHERIT(peer, group, allowas_in[afi][safi]);
+ /* soo */
+ if (!CHECK_FLAG(pflags_ovrd, PEER_FLAG_SOO))
+ PEER_ATTR_INHERIT(peer, group, soo[afi][safi]);
+
/* weight */
if (!CHECK_FLAG(pflags_ovrd, PEER_FLAG_WEIGHT))
PEER_ATTR_INHERIT(peer, group, weight[afi][safi]);
@@ -2548,6 +2547,7 @@ int peer_delete(struct peer *peer)
XFREE(MTYPE_BGP_FILTER_NAME, filter->usmap.name);
XFREE(MTYPE_ROUTE_MAP_NAME, peer->default_rmap[afi][safi].name);
+ ecommunity_free(&peer->soo[afi][safi]);
}
FOREACH_AFI_SAFI (afi, safi)
@@ -4017,7 +4017,6 @@ struct peer *peer_lookup_dynamic_neighbor(struct bgp *bgp, union sockunion *su)
struct prefix prefix;
struct prefix *listen_range;
int dncount;
- char buf[PREFIX2STR_BUFFER];
if (!sockunion2hostprefix(su, &prefix))
return NULL;
@@ -4034,21 +4033,19 @@ struct peer *peer_lookup_dynamic_neighbor(struct bgp *bgp, union sockunion *su)
if (!gbgp)
return NULL;
- prefix2str(&prefix, buf, sizeof(buf));
-
if (bgp_debug_neighbor_events(NULL))
zlog_debug(
- "Dynamic Neighbor %s matches group %s listen range %pFX",
- buf, group->name, listen_range);
+ "Dynamic Neighbor %pFX matches group %s listen range %pFX",
+ &prefix, group->name, listen_range);
/* Are we within the listen limit? */
dncount = gbgp->dynamic_neighbors_count;
if (dncount >= gbgp->dynamic_neighbors_limit) {
if (bgp_debug_neighbor_events(NULL))
- zlog_debug("Dynamic Neighbor %s rejected - at limit %d",
- inet_sutop(su, buf),
- gbgp->dynamic_neighbors_limit);
+ zlog_debug(
+ "Dynamic Neighbor %pFX rejected - at limit %d",
+ &prefix, gbgp->dynamic_neighbors_limit);
return NULL;
}
@@ -4056,8 +4053,8 @@ struct peer *peer_lookup_dynamic_neighbor(struct bgp *bgp, union sockunion *su)
if (CHECK_FLAG(group->conf->flags, PEER_FLAG_SHUTDOWN)) {
if (bgp_debug_neighbor_events(NULL))
zlog_debug(
- "Dynamic Neighbor %s rejected - group %s disabled",
- buf, group->name);
+ "Dynamic Neighbor %pFX rejected - group %s disabled",
+ &prefix, group->name);
return NULL;
}
@@ -4065,8 +4062,8 @@ struct peer *peer_lookup_dynamic_neighbor(struct bgp *bgp, union sockunion *su)
if (!peer_group_af_configured(group)) {
if (bgp_debug_neighbor_events(NULL))
zlog_debug(
- "Dynamic Neighbor %s rejected - no AF activated for group %s",
- buf, group->name);
+ "Dynamic Neighbor %pFX rejected - no AF activated for group %s",
+ &prefix, group->name);
return NULL;
}
@@ -4278,6 +4275,7 @@ static const struct peer_flag_action peer_af_flag_action_list[] = {
{PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE, 1, peer_change_reset_out},
{PEER_FLAG_WEIGHT, 0, peer_change_reset_in},
{PEER_FLAG_DISABLE_ADDPATH_RX, 0, peer_change_reset},
+ {PEER_FLAG_SOO, 0, peer_change_reset},
{0, 0, 0}};
/* Proper action set. */
@@ -7960,7 +7958,7 @@ char *peer_uptime(time_t uptime2, char *buf, size_t len, bool use_json,
}
/* Get current time. */
- uptime1 = bgp_clock();
+ uptime1 = monotime(NULL);
uptime1 -= uptime2;
gmtime_r(&uptime1, &tm);
@@ -8002,7 +8000,7 @@ void bgp_master_init(struct thread_master *master, const int buffer_size,
bm->port = BGP_PORT_DEFAULT;
bm->addresses = addresses;
bm->master = master;
- bm->start_time = bgp_clock();
+ bm->start_time = monotime(NULL);
bm->t_rmap_update = NULL;
bm->rmap_update_timer = RMAP_DEFAULT_UPDATE_TIMER;
bm->v_update_delay = BGP_UPDATE_DELAY_DEF;
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 8348b37b8e..1d04ccee42 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -1411,6 +1411,7 @@ struct peer {
#define PEER_FLAG_MAX_PREFIX_OUT (1U << 27) /* outgoing maximum prefix */
#define PEER_FLAG_MAX_PREFIX_FORCE (1U << 28) /* maximum-prefix <num> force */
#define PEER_FLAG_DISABLE_ADDPATH_RX (1U << 29) /* disable-addpath-rx */
+#define PEER_FLAG_SOO (1U << 30) /* soo */
enum bgp_addpath_strat addpath_type[AFI_MAX][SAFI_MAX];
@@ -1620,6 +1621,9 @@ struct peer {
/* allowas-in. */
char allowas_in[AFI_MAX][SAFI_MAX];
+ /* soo */
+ struct ecommunity *soo[AFI_MAX][SAFI_MAX];
+
/* weight */
unsigned long weight[AFI_MAX][SAFI_MAX];
@@ -2043,7 +2047,6 @@ extern unsigned int multipath_num;
/* Prototypes. */
extern void bgp_terminate(void);
extern void bgp_reset(void);
-extern time_t bgp_clock(void);
extern void bgp_zclient_reset(void);
extern struct bgp *bgp_get_default(void);
extern struct bgp *bgp_lookup(as_t, const char *);
@@ -2449,7 +2452,7 @@ static inline int peer_group_af_configured(struct peer_group *group)
static inline char *timestamp_string(time_t ts)
{
time_t tbuf;
- tbuf = time(NULL) - (bgp_clock() - ts);
+ tbuf = time(NULL) - (monotime(NULL) - ts);
return ctime(&tbuf);
}
diff --git a/bgpd/rfapi/bgp_rfapi_cfg.c b/bgpd/rfapi/bgp_rfapi_cfg.c
index 2437bd8cfe..831f92996a 100644
--- a/bgpd/rfapi/bgp_rfapi_cfg.c
+++ b/bgpd/rfapi/bgp_rfapi_cfg.c
@@ -94,7 +94,7 @@ DEFINE_QOBJ_TYPE(rfapi_l2_group_cfg);
*/
time_t rfapi_time(time_t *t)
{
- time_t clock = bgp_clock();
+ time_t clock = monotime(NULL);
if (t)
*t = clock;
return clock;
diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c
index 382886e0bd..a34c10d842 100644
--- a/bgpd/rfapi/rfapi.c
+++ b/bgpd/rfapi/rfapi.c
@@ -1006,7 +1006,7 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */
bgp_aggregate_decrement(bgp, p, bpi, afi, safi);
bgp_attr_unintern(&bpi->attr);
bpi->attr = new_attr;
- bpi->uptime = bgp_clock();
+ bpi->uptime = monotime(NULL);
if (safi == SAFI_MPLS_VPN) {
@@ -1351,8 +1351,7 @@ int rfapi_init_and_open(struct bgp *bgp, struct rfapi_descriptor *rfd,
struct prefix pfx_un;
struct agg_node *rn;
-
- rfapi_time(&rfd->open_time);
+ rfd->open_time = monotime(NULL);
if (rfg->type == RFAPI_GROUP_CFG_VRF)
SET_FLAG(rfd->flags, RFAPI_HD_FLAG_IS_VRF);
@@ -1521,10 +1520,10 @@ rfapi_query_inner(void *handle, struct rfapi_ip_addr *target,
}
rfd->rsp_counter++; /* dedup: identify this generation */
- rfd->rsp_time = rfapi_time(NULL); /* response content dedup */
+ rfd->rsp_time = monotime(NULL); /* response content dedup */
rfd->ftd_last_allowed_time =
- bgp_clock()
- - bgp->rfapi_cfg->rfp_cfg.ftd_advertisement_interval;
+ monotime(NULL) -
+ bgp->rfapi_cfg->rfp_cfg.ftd_advertisement_interval;
if (l2o) {
if (!memcmp(l2o->macaddr.octet, rfapi_ethaddr0.octet,
diff --git a/bgpd/rfapi/rfapi_import.c b/bgpd/rfapi/rfapi_import.c
index 1d42702769..be64153cef 100644
--- a/bgpd/rfapi/rfapi_import.c
+++ b/bgpd/rfapi/rfapi_import.c
@@ -489,7 +489,7 @@ static struct bgp_path_info *rfapiBgpInfoCreate(struct attr *attr,
bgp_path_info_extra_get(new);
if (prd) {
new->extra->vnc.import.rd = *prd;
- rfapi_time(&new->extra->vnc.import.create_time);
+ new->extra->vnc.import.create_time = monotime(NULL);
}
if (label)
encode_label(*label, &new->extra->label[0]);
@@ -3750,7 +3750,7 @@ void rfapiBgpInfoFilteredImportVPN(
remote_peer_match = 1;
}
- if (!un_match & !remote_peer_match)
+ if (!un_match && !remote_peer_match)
continue;
vnc_zlog_debug_verbose(
diff --git a/bgpd/rfapi/rfapi_private.h b/bgpd/rfapi/rfapi_private.h
index bc0e192ae2..8c76e1dd0b 100644
--- a/bgpd/rfapi/rfapi_private.h
+++ b/bgpd/rfapi/rfapi_private.h
@@ -364,6 +364,11 @@ extern int rfapi_extract_l2o(
* compaitibility to old quagga_time call
* time_t value in terms of stabilised absolute time.
* replacement for POSIX time()
+ *
+ * Please do not use this. This is kept only for
+ * Lou's CI in that that CI compiles against some
+ * private bgp code and it will just fail to compile
+ * without this. Use monotime()
*/
extern time_t rfapi_time(time_t *t);
diff --git a/bgpd/rfapi/rfapi_rib.c b/bgpd/rfapi/rfapi_rib.c
index 9d61ada7db..9e13c48134 100644
--- a/bgpd/rfapi/rfapi_rib.c
+++ b/bgpd/rfapi/rfapi_rib.c
@@ -784,7 +784,7 @@ int rfapiRibPreloadBi(
skiplist_insert(slRibPt, &ori->rk, ori);
}
- ori->last_sent_time = rfapi_time(NULL);
+ ori->last_sent_time = monotime(NULL);
/*
* poke timer
@@ -797,7 +797,7 @@ int rfapiRibPreloadBi(
* Update last sent time for prefix
*/
trn = agg_node_get(rfd->rsp_times[afi], p); /* locks trn */
- trn->info = (void *)(uintptr_t)bgp_clock();
+ trn->info = (void *)(uintptr_t)monotime(NULL);
if (agg_node_get_lock_count(trn) > 1)
agg_unlock_node(trn);
@@ -1089,7 +1089,7 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
rfapiFreeBgpTeaOptionChain(ori->tea_options);
ori->tea_options =
rfapiOptionsDup(ri->tea_options);
- ori->last_sent_time = rfapi_time(NULL);
+ ori->last_sent_time = monotime(NULL);
rfapiFreeRfapiVnOptionChain(ori->vn_options);
ori->vn_options =
@@ -1115,7 +1115,7 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
ori->lifetime = ri->lifetime;
ori->tea_options =
rfapiOptionsDup(ri->tea_options);
- ori->last_sent_time = rfapi_time(NULL);
+ ori->last_sent_time = monotime(NULL);
ori->vn_options =
rfapiVnOptionsDup(ri->vn_options);
ori->un_options =
@@ -1227,7 +1227,7 @@ callback:
*/
trn = agg_node_get(rfd->rsp_times[afi],
p); /* locks trn */
- trn->info = (void *)(uintptr_t)bgp_clock();
+ trn->info = (void *)(uintptr_t)monotime(NULL);
if (agg_node_get_lock_count(trn) > 1)
agg_unlock_node(trn);
@@ -1376,7 +1376,7 @@ callback:
rfapiRibStartTimer(rfd, ri, rn, 1);
RFAPI_RIB_CHECK_COUNTS(
0, delete_list->count);
- ri->last_sent_time = rfapi_time(NULL);
+ ri->last_sent_time = monotime(NULL);
#if DEBUG_RIB_SL_RD
{
char buf_rd[RD_ADDRSTRLEN];
@@ -1400,7 +1400,7 @@ callback:
rfapiRibStartTimer(rfd, ri_del, rn, 1);
RFAPI_RIB_CHECK_COUNTS(
0, delete_list->count);
- ri->last_sent_time = rfapi_time(NULL);
+ ri->last_sent_time = monotime(NULL);
}
}
} else {
@@ -1849,7 +1849,7 @@ rfapiRibPreload(struct bgp *bgp, struct rfapi_descriptor *rfd,
vnc_zlog_debug_verbose("%s: loading response=%p, use_eth_resolution=%d",
__func__, response, use_eth_resolution);
- new_last_sent_time = rfapi_time(NULL);
+ new_last_sent_time = monotime(NULL);
for (nhp = response; nhp; nhp = nhp_next) {
@@ -2019,7 +2019,7 @@ rfapiRibPreload(struct bgp *bgp, struct rfapi_descriptor *rfd,
ri->lifetime = nhp->lifetime;
ri->vn_options = rfapiVnOptionsDup(nhp->vn_options);
ri->rsp_counter = rfd->rsp_counter;
- ri->last_sent_time = rfapi_time(NULL);
+ ri->last_sent_time = monotime(NULL);
if (need_insert) {
int rc;
@@ -2042,7 +2042,7 @@ rfapiRibPreload(struct bgp *bgp, struct rfapi_descriptor *rfd,
* update this NVE's timestamp for this prefix
*/
trn = agg_node_get(rfd->rsp_times[afi], &pfx); /* locks trn */
- trn->info = (void *)(uintptr_t)bgp_clock();
+ trn->info = (void *)(uintptr_t)monotime(NULL);
if (agg_node_get_lock_count(trn) > 1)
agg_unlock_node(trn);
@@ -2275,7 +2275,7 @@ static int print_rib_sl(int (*fp)(void *, const char *, ...), struct vty *vty,
rfapiFormatAge(ri->last_sent_time, str_age, BUFSIZ);
#else
{
- time_t now = rfapi_time(NULL);
+ time_t now = monotime(NULL);
time_t expire =
ri->last_sent_time + (time_t)ri->lifetime;
/* allow for delayed/async removal */
diff --git a/bgpd/rfapi/rfapi_vty.c b/bgpd/rfapi/rfapi_vty.c
index c8fdadcac9..a8ab618417 100644
--- a/bgpd/rfapi/rfapi_vty.c
+++ b/bgpd/rfapi/rfapi_vty.c
@@ -109,7 +109,7 @@ char *rfapiFormatAge(time_t age, char *buf, size_t len)
{
time_t now, age_adjusted;
- now = rfapi_time(NULL);
+ now = monotime(NULL);
age_adjusted = now - age;
return rfapiFormatSeconds(age_adjusted, buf, len);
diff --git a/configure.ac b/configure.ac
index b7e17d3565..8c1fab0eab 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1372,7 +1372,7 @@ case "${enable_vtysh}" in
AC_DEFINE([VTYSH], [1], [VTY shell])
prev_libs="$LIBS"
- AC_CHECK_LIB([readline], [main], [
+ AC_CHECK_LIB([readline], [readline], [
LIBREADLINE="-lreadline"
], [
dnl readline failed - it might be incorrectly linked and missing its
diff --git a/debian/control b/debian/control
index e8bf1a8ffa..06c16cc945 100644
--- a/debian/control
+++ b/debian/control
@@ -30,6 +30,7 @@ Build-Depends: bison,
python3-pytest <!nocheck>,
python3-sphinx,
texinfo (>= 4.7),
+ lua5.3 <pkg.frr.lua>,
liblua5.3-dev <pkg.frr.lua>
Standards-Version: 4.5.0.3
Homepage: https://www.frrouting.org/
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index e31bfe7bfa..033b753639 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -2490,6 +2490,9 @@ Extended Community Lists
there is no matched entry, deny will be returned. When `extcommunity` is
empty it matches to any routes.
+ A special handling for ``internet`` community is applied. It matches
+ any community.
+
.. clicmd:: bgp extcommunity-list expanded NAME permit|deny LINE
This command defines a new expanded extcommunity-list. `line` is a string
@@ -2832,6 +2835,19 @@ of the global VPNv4/VPNv6 family. This command defaults to on and is not
displayed.
The `no bgp retain route-target all` form of the command is displayed.
+.. clicmd:: neighbor <A.B.C.D|X:X::X:X|WORD> soo EXTCOMMUNITY
+
+Without this command, SoO extended community attribute is configured using
+an inbound route map that sets the SoO value during the update process.
+With the introduction of the new BGP per-neighbor Site-of-Origin (SoO) feature,
+two new commands configured in sub-modes under router configuration mode
+simplify the SoO value configuration.
+
+If we configure SoO per neighbor at PEs, the SoO community is automatically
+added for all routes from the CPEs. Routes are validated and prevented from
+being sent back to the same CPE (e.g.: multi-site). This is especially needed
+when using ``as-override`` or ``allowas-in`` to prevent routing loops.
+
.. _bgp-l3vpn-srv6:
L3VPN SRv6
diff --git a/doc/user/pimv6.rst b/doc/user/pimv6.rst
index 851e58b814..843734e217 100644
--- a/doc/user/pimv6.rst
+++ b/doc/user/pimv6.rst
@@ -195,7 +195,7 @@ is in a vrf, enter the interface command with the vrf keyword at the end.
Set the MLD last member query count. The default value is 2. 'no' form of
this command is used to configure back to the default value.
-.. clicmd:: ipv6 MLD last-member-query-interval (1-65535)
+.. clicmd:: ipv6 mld last-member-query-interval (1-65535)
Set the MLD last member query interval in deciseconds. The default value is
10 deciseconds. 'no' form of this command is used to to configure back to the
@@ -325,6 +325,9 @@ MLD state
a MLDv2 querier. MLDv1 joins are recorded as "untracked" and shown in the
``NonTrkSeen`` output column.
+.. clicmd:: show ipv6 mld [vrf NAME] groups [json]
+
+ Display MLD group information.
General multicast routing state
-------------------------------
diff --git a/gdb/lib.txt b/gdb/lib.txt
index 913b455ed1..b44c237985 100644
--- a/gdb/lib.txt
+++ b/gdb/lib.txt
@@ -293,3 +293,25 @@ Arguments:
1st: A (struct route_node *) to the top of the route table.
2nd: The (struct route_node *) to walk up from
end
+
+define mq_walk
+ set $mg = (struct memgroup *)$arg0
+
+ while ($mg)
+ printf "showing active allocations in memory group %s\n", $mg->name
+ set $mt = (struct memtype *)$mg->types
+ while ($mt)
+ printf "memstats: %s:%zu\n", $mt->name, $mt->n_alloc
+ set $mt = $mt->next
+ end
+ set $mg = $mg->next
+ end
+
+document mg_walk
+Walk the memory data structures to show what is holding memory.
+
+Arguments:
+1st: A (struct memgroup *) where to start the walk. If you are not
+ sure where to start pass it mg_first, which is a global DS for
+ all memory allocated in FRR
+end
diff --git a/lib/sigevent.c b/lib/sigevent.c
index 0f20bc0270..985bedeb92 100644
--- a/lib/sigevent.c
+++ b/lib/sigevent.c
@@ -134,8 +134,7 @@ int frr_sigevent_process(void)
#ifdef SIGEVENT_BLOCK_SIGNALS
if (sigprocmask(SIG_UNBLOCK, &oldmask, NULL) < 0)
- ;
- return -1;
+ return -1;
#endif /* SIGEVENT_BLOCK_SIGNALS */
return 0;
diff --git a/lib/thread.c b/lib/thread.c
index c3613b5b0e..9eac9b410a 100644
--- a/lib/thread.c
+++ b/lib/thread.c
@@ -1354,9 +1354,9 @@ static void do_thread_cancel(struct thread_master *master)
struct thread_list_head *list = NULL;
struct thread **thread_array = NULL;
struct thread *thread;
-
struct cancel_req *cr;
struct listnode *ln;
+
for (ALL_LIST_ELEMENTS_RO(master->cancel_req, ln, cr)) {
/*
* If this is an event object cancellation, search
@@ -1379,6 +1379,9 @@ static void do_thread_cancel(struct thread_master *master)
if (!thread)
continue;
+ list = NULL;
+ thread_array = NULL;
+
/* Determine the appropriate queue to cancel the thread from */
switch (thread->type) {
case THREAD_READ:
diff --git a/lib/zclient.c b/lib/zclient.c
index e556b768ac..8ec82ab7bb 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -447,7 +447,7 @@ enum zclient_send_status zclient_send_localsid(struct zclient *zclient,
{
struct prefix_ipv6 p = {};
struct zapi_route api = {};
- struct nexthop nh = {};
+ struct zapi_nexthop *znh;
p.family = AF_INET6;
p.prefixlen = IPV6_MAX_BITLEN;
@@ -465,12 +465,16 @@ enum zclient_send_status zclient_send_localsid(struct zclient *zclient,
SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION);
SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
- nh.type = NEXTHOP_TYPE_IFINDEX;
- nh.ifindex = oif;
- SET_FLAG(nh.flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL);
- nexthop_add_srv6_seg6local(&nh, action, context);
+ znh = &api.nexthops[0];
+
+ memset(znh, 0, sizeof(*znh));
+
+ znh->type = NEXTHOP_TYPE_IFINDEX;
+ znh->ifindex = oif;
+ SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL);
+ znh->seg6local_action = action;
+ memcpy(&znh->seg6local_ctx, context, sizeof(struct seg6local_context));
- zapi_nexthop_from_nexthop(&api.nexthops[0], &nh);
api.nexthop_num = 1;
return zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
diff --git a/nhrpd/nhrp_interface.c b/nhrpd/nhrp_interface.c
index 1092ce13a1..4ac30a7d75 100644
--- a/nhrpd/nhrp_interface.c
+++ b/nhrpd/nhrp_interface.c
@@ -165,8 +165,7 @@ static void nhrp_interface_interface_notifier(struct notifier_block *n,
switch (cmd) {
case NOTIFY_INTERFACE_CHANGED:
- nhrp_interface_update_mtu(nifp->ifp, AFI_IP);
- nhrp_interface_update_source(nifp->ifp);
+ nhrp_interface_update_nbma(nifp->ifp, NULL);
break;
case NOTIFY_INTERFACE_ADDRESS_CHANGED:
nifp->nbma = nbmanifp->afi[AFI_IP].addr;
diff --git a/ospf6d/ospf6_abr.c b/ospf6d/ospf6_abr.c
index 5af1139d9b..e9c42bb80c 100644
--- a/ospf6d/ospf6_abr.c
+++ b/ospf6d/ospf6_abr.c
@@ -488,7 +488,12 @@ int ospf6_abr_originate_summary_to_area(struct ospf6_route *route,
zlog_debug(
"Suppressed by range %pFX of area %s",
&range->prefix, route_area->name);
- ospf6_abr_delete_route(summary, summary_table, old);
+ /* The existing summary route could be a range, don't
+ * remove it in this case
+ */
+ if (summary && summary->type != OSPF6_DEST_TYPE_RANGE)
+ ospf6_abr_delete_route(summary, summary_table,
+ old);
return 0;
}
}
diff --git a/ospfd/ospf_lsa.c b/ospfd/ospf_lsa.c
index 278f263da3..1362554715 100644
--- a/ospfd/ospf_lsa.c
+++ b/ospfd/ospf_lsa.c
@@ -2865,10 +2865,11 @@ struct ospf_lsa *ospf_lsa_install(struct ospf *ospf, struct ospf_interface *oi,
* So, router should be aborted from HELPER role
* if it is detected as TOPO change.
*/
- if (ospf->active_restarter_cnt
- && CHECK_LSA_TYPE_1_TO_5_OR_7(lsa->data->type)
- && ospf_lsa_different(old, lsa, true))
- ospf_helper_handle_topo_chg(ospf, lsa);
+ if (ospf->active_restarter_cnt &&
+ CHECK_LSA_TYPE_1_TO_5_OR_7(lsa->data->type)) {
+ if (old == NULL || ospf_lsa_different(old, lsa, true))
+ ospf_helper_handle_topo_chg(ospf, lsa);
+ }
rt_recalc = 1;
}
diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c
index a6572794aa..7d72487686 100644
--- a/ospfd/ospf_vty.c
+++ b/ospfd/ospf_vty.c
@@ -10118,6 +10118,21 @@ static int ospf_print_vty_helper_dis_rtr_walkcb(struct hash_bucket *bucket,
return HASHWALK_CONTINUE;
}
+static int ospf_print_json_helper_enabled_rtr_walkcb(struct hash_bucket *bucket,
+ void *arg)
+{
+ struct advRtr *rtr = bucket->data;
+ struct json_object *json_rid_array = arg;
+ struct json_object *json_rid;
+
+ json_rid = json_object_new_object();
+
+ json_object_string_addf(json_rid, "routerId", "%pI4", &rtr->advRtrAddr);
+ json_object_array_add(json_rid_array, json_rid);
+
+ return HASHWALK_CONTINUE;
+}
+
static int ospf_show_gr_helper_details(struct vty *vty, struct ospf *ospf,
uint8_t use_vrf, json_object *json,
bool uj, bool detail)
@@ -10237,6 +10252,18 @@ CPP_NOTICE("Remove JSON object commands with keys starting with capital")
if (ospf->active_restarter_cnt)
json_object_int_add(json_vrf, "activeRestarterCnt",
ospf->active_restarter_cnt);
+
+ if (OSPF_HELPER_ENABLE_RTR_COUNT(ospf)) {
+ struct json_object *json_rid_array =
+ json_object_new_array();
+
+ json_object_object_add(json_vrf, "enabledRouterIds",
+ json_rid_array);
+
+ hash_walk(ospf->enable_rtr_list,
+ ospf_print_json_helper_enabled_rtr_walkcb,
+ json_rid_array);
+ }
}
diff --git a/pimd/mtracebis_netlink.c b/pimd/mtracebis_netlink.c
index fe2cb56a26..81e28f2407 100644
--- a/pimd/mtracebis_netlink.c
+++ b/pimd/mtracebis_netlink.c
@@ -92,7 +92,7 @@ int rtnl_open_byproto(struct rtnl_handle *rth, unsigned subscriptions,
rth->local.nl_family);
return -1;
}
- rth->seq = time(NULL);
+ rth->seq = (uint32_t)time(NULL);
return 0;
}
diff --git a/pimd/pim6_mld.c b/pimd/pim6_mld.c
index c5c98d8024..4b10c4c9c1 100644
--- a/pimd/pim6_mld.c
+++ b/pimd/pim6_mld.c
@@ -33,12 +33,15 @@
#include "lib/prefix.h"
#include "lib/checksum.h"
#include "lib/thread.h"
+#include "termtable.h"
#include "pimd/pim6_mld.h"
#include "pimd/pim6_mld_protocol.h"
#include "pimd/pim_memory.h"
#include "pimd/pim_instance.h"
#include "pimd/pim_iface.h"
+#include "pimd/pim6_cmd.h"
+#include "pimd/pim_cmd_common.h"
#include "pimd/pim_util.h"
#include "pimd/pim_tib.h"
#include "pimd/pimd.h"
@@ -415,7 +418,7 @@ static void gm_sg_update(struct gm_sg *sg, bool has_expired)
gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
THREAD_OFF(sg->t_sg_query);
- sg->n_query = gm_ifp->cur_qrv;
+ sg->n_query = gm_ifp->cur_lmqc;
sg->query_sbit = false;
gm_trigger_specific(sg);
}
@@ -2088,11 +2091,12 @@ static void gm_start(struct interface *ifp)
else
gm_ifp->cur_version = GM_MLDV2;
- /* hardcoded for dev without CLI */
- gm_ifp->cur_qrv = 2;
+ gm_ifp->cur_qrv = pim_ifp->gm_default_robustness_variable;
gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000;
- gm_ifp->cur_query_intv_trig = gm_ifp->cur_query_intv;
- gm_ifp->cur_max_resp = 250;
+ gm_ifp->cur_query_intv_trig =
+ pim_ifp->gm_specific_query_max_response_time_dsec * 100;
+ gm_ifp->cur_max_resp = pim_ifp->gm_query_max_response_time_dsec * 100;
+ gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
gm_ifp->cfg_timing_fuzz.tv_sec = 0;
gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000;
@@ -2246,8 +2250,16 @@ void gm_ifp_update(struct interface *ifp)
return;
}
- if (!pim_ifp->mld)
+ /*
+ * If ipv6 mld is not enabled on interface, do not start mld activites.
+ */
+ if (!pim_ifp->gm_enable)
+ return;
+
+ if (!pim_ifp->mld) {
+ changed = true;
gm_start(ifp);
+ }
gm_ifp = pim_ifp->mld;
if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest))
@@ -2257,10 +2269,26 @@ void gm_ifp_update(struct interface *ifp)
if (gm_ifp->cur_query_intv != cfg_query_intv) {
gm_ifp->cur_query_intv = cfg_query_intv;
- gm_ifp->cur_query_intv_trig = cfg_query_intv;
changed = true;
}
+ unsigned int cfg_query_intv_trig =
+ pim_ifp->gm_specific_query_max_response_time_dsec * 100;
+
+ if (gm_ifp->cur_query_intv_trig != cfg_query_intv_trig) {
+ gm_ifp->cur_query_intv_trig = cfg_query_intv_trig;
+ changed = true;
+ }
+
+ unsigned int cfg_max_response =
+ pim_ifp->gm_query_max_response_time_dsec * 100;
+
+ if (gm_ifp->cur_max_resp != cfg_max_response)
+ gm_ifp->cur_max_resp = cfg_max_response;
+
+ if (gm_ifp->cur_lmqc != pim_ifp->gm_last_member_query_count)
+ gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
+
enum gm_version cfg_version;
if (pim_ifp->mld_version == 1)
@@ -2306,8 +2334,6 @@ void gm_group_delete(struct gm_if *gm_ifp)
#include "pimd/pim6_mld_clippy.c"
#endif
-#define MLD_STR "Multicast Listener Discovery\n"
-
static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str,
int *err)
{
@@ -2847,6 +2873,125 @@ DEFPY(gm_show_interface_joins,
return vty_json(vty, js);
}
+static void gm_show_groups(struct vty *vty, struct vrf *vrf, bool uj)
+{
+ struct interface *ifp;
+ struct ttable *tt = NULL;
+ char *table;
+ json_object *json = NULL;
+ json_object *json_iface = NULL;
+ json_object *json_group = NULL;
+ json_object *json_groups = NULL;
+ struct pim_instance *pim = vrf->info;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_int_add(json, "totalGroups", pim->gm_group_count);
+ json_object_int_add(json, "watermarkLimit",
+ pim->gm_watermark_limit);
+ } else {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Interface|Group|Version|Uptime");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+
+ vty_out(vty, "Total MLD groups: %u\n", pim->gm_group_count);
+ vty_out(vty, "Watermark warn limit(%s): %u\n",
+ pim->gm_watermark_limit ? "Set" : "Not Set",
+ pim->gm_watermark_limit);
+ }
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (vrf, ifp) {
+
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_if *gm_ifp;
+ struct gm_sg *sg;
+
+ if (!pim_ifp)
+ continue;
+
+ gm_ifp = pim_ifp->mld;
+ if (!gm_ifp)
+ continue;
+
+ /* scan mld groups */
+ frr_each (gm_sgs, gm_ifp->sgs, sg) {
+
+ if (uj) {
+ json_object_object_get_ex(json, ifp->name,
+ &json_iface);
+
+ if (!json_iface) {
+ json_iface = json_object_new_object();
+ json_object_pim_ifp_add(json_iface,
+ ifp);
+ json_object_object_add(json, ifp->name,
+ json_iface);
+ json_groups = json_object_new_array();
+ json_object_object_add(json_iface,
+ "groups",
+ json_groups);
+ }
+
+ json_group = json_object_new_object();
+ json_object_string_addf(json_group, "group",
+ "%pPAs",
+ &sg->sgaddr.grp);
+
+ json_object_int_add(json_group, "version",
+ pim_ifp->mld_version);
+ json_object_string_addf(json_group, "uptime",
+ "%pTVMs", &sg->created);
+ json_object_array_add(json_groups, json_group);
+ } else {
+ ttable_add_row(tt, "%s|%pPAs|%d|%pTVMs",
+ ifp->name, &sg->sgaddr.grp,
+ pim_ifp->mld_version,
+ &sg->created);
+ }
+ } /* scan gm groups */
+ } /* scan interfaces */
+
+ if (uj)
+ vty_json(vty, json);
+ else {
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+DEFPY(gm_show_mld_groups,
+ gm_show_mld_groups_cmd,
+ "show ipv6 mld [vrf <VRF|all>$vrf_str] groups [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MLD_STR
+ VRF_FULL_CMD_HELP_STR
+ MLD_GROUP_STR
+ JSON_STR)
+{
+ int ret = CMD_SUCCESS;
+ struct vrf *vrf;
+
+ vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
+ if (ret != CMD_SUCCESS)
+ return ret;
+
+ if (vrf)
+ gm_show_groups(vty, vrf, !!json);
+ else
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
+ gm_show_groups(vty, vrf, !!json);
+
+ return CMD_SUCCESS;
+}
+
DEFPY(gm_debug_show,
gm_debug_show_cmd,
"debug show mld interface IFNAME",
@@ -3021,6 +3166,7 @@ void gm_cli_init(void)
install_element(VIEW_NODE, &gm_show_interface_cmd);
install_element(VIEW_NODE, &gm_show_interface_stats_cmd);
install_element(VIEW_NODE, &gm_show_interface_joins_cmd);
+ install_element(VIEW_NODE, &gm_show_mld_groups_cmd);
install_element(VIEW_NODE, &gm_debug_show_cmd);
install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd);
diff --git a/pimd/pim6_mld.h b/pimd/pim6_mld.h
index 95523c2922..540d2e1899 100644
--- a/pimd/pim6_mld.h
+++ b/pimd/pim6_mld.h
@@ -324,6 +324,7 @@ struct gm_if {
unsigned int cur_query_intv_trig; /* ms */
unsigned int cur_max_resp; /* ms */
enum gm_version cur_version;
+ int cur_lmqc; /* last member query count in ds */
/* this value (positive, default 10ms) defines our "timing tolerance":
* - added to deadlines for expiring joins
diff --git a/pimd/pim_addr.h b/pimd/pim_addr.h
index 2f2ff24675..7852d1788a 100644
--- a/pimd/pim_addr.h
+++ b/pimd/pim_addr.h
@@ -31,6 +31,7 @@ typedef struct in_addr pim_addr;
#define PIM_ADDRSTRLEN INET_ADDRSTRLEN
#define PIM_AF AF_INET
#define PIM_AFI AFI_IP
+#define PIM_PROTO_REG IPPROTO_RAW
#define PIM_IPADDR IPADDR_V4
#define ipaddr_pim ipaddr_v4
#define PIM_MAX_BITLEN IPV4_MAX_BITLEN
@@ -38,6 +39,7 @@ typedef struct in_addr pim_addr;
#define PIM_AF_DBG "pim"
#define PIM_MROUTE_DBG "mroute"
#define PIMREG "pimreg"
+#define GM "IGMP"
#define PIM_ADDR_FUNCNAME(name) ipv4_##name
@@ -57,6 +59,7 @@ typedef struct in6_addr pim_addr;
#define PIM_ADDRSTRLEN INET6_ADDRSTRLEN
#define PIM_AF AF_INET6
#define PIM_AFI AFI_IP6
+#define PIM_PROTO_REG IPPROTO_PIM
#define PIM_IPADDR IPADDR_V6
#define ipaddr_pim ipaddr_v6
#define PIM_MAX_BITLEN IPV6_MAX_BITLEN
@@ -64,6 +67,7 @@ typedef struct in6_addr pim_addr;
#define PIM_AF_DBG "pimv6"
#define PIM_MROUTE_DBG "mroute6"
#define PIMREG "pim6reg"
+#define GM "MLD"
#define PIM_ADDR_FUNCNAME(name) ipv6_##name
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index c2453efa06..f0b6037db9 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -1129,11 +1129,11 @@ static void igmp_show_groups(struct pim_instance *pim, struct vty *vty, bool uj)
if (uj) {
json = json_object_new_object();
- json_object_int_add(json, "totalGroups", pim->igmp_group_count);
+ json_object_int_add(json, "totalGroups", pim->gm_group_count);
json_object_int_add(json, "watermarkLimit",
pim->gm_watermark_limit);
} else {
- vty_out(vty, "Total IGMP groups: %u\n", pim->igmp_group_count);
+ vty_out(vty, "Total IGMP groups: %u\n", pim->gm_group_count);
vty_out(vty, "Watermark warn limit(%s): %u\n",
pim->gm_watermark_limit ? "Set" : "Not Set",
pim->gm_watermark_limit);
diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c
index 1d3f5f430a..70c1544717 100644
--- a/pimd/pim_cmd_common.c
+++ b/pimd/pim_cmd_common.c
@@ -888,6 +888,8 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
{
struct pim_upstream *up;
time_t now = pim_time_monotonic_sec();
+ struct ttable *tt = NULL;
+ char *table = NULL;
json_object *json_group = NULL;
json_object *json_row = NULL;
@@ -895,8 +897,15 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
if (!json) {
vty_out(vty, "\n");
- vty_out(vty,
- "Source Group RpfIface RpfAddress RibNextHop Metric Pref\n");
+
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Source|Group|RpfIface|RpfAddress|RibNextHop|Metric|Pref");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
}
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
@@ -944,8 +953,8 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
json_object_object_add(json_group, src_str, json_row);
} else {
- vty_out(vty,
- "%-15pPAs %-15pPAs %-16s %-15pPA %-15pPAs %6d %4d\n",
+ ttable_add_row(
+ tt, "%pPAs|%pPAs|%s|%pPA|%pPAs|%d|%d",
&up->sg.src, &up->sg.grp, rpf_ifname,
&rpf->rpf_addr,
&rpf->source_nexthop.mrib_nexthop_addr,
@@ -953,14 +962,27 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
rpf->source_nexthop.mrib_metric_preference);
}
}
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
}
void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty)
{
struct interface *ifp;
+ struct ttable *tt = NULL;
+ char *table = NULL;
- vty_out(vty,
- "Interface Address Neighbor Secondary \n");
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Interface|Address|Neighbor|Secondary");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
FOR_ALL_INTERFACES (pim->vrf, ifp) {
struct pim_interface *pim_ifp;
@@ -988,12 +1010,16 @@ void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty)
for (ALL_LIST_ELEMENTS_RO(neigh->prefix_list,
prefix_node, p))
- vty_out(vty,
- "%-16s %-15pPAs %-15pPAs %-15pFX\n",
- ifp->name, &ifaddr, &neigh->source_addr,
- p);
+ ttable_add_row(tt, "%s|%pPAs|%pPAs|%pFX",
+ ifp->name, &ifaddr,
+ &neigh->source_addr, p);
}
}
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
}
void pim_show_state(struct pim_instance *pim, struct vty *vty,
@@ -1012,7 +1038,7 @@ void pim_show_state(struct pim_instance *pim, struct vty *vty,
if (!json) {
vty_out(vty,
- "Codes: J -> Pim Join, I -> IGMP Report, S -> Source, * -> Inherited from (*,G), V -> VxLAN, M -> Muted");
+ "Codes: J -> Pim Join, I -> " GM " Report, S -> Source, * -> Inherited from (*,G), V -> VxLAN, M -> Muted");
vty_out(vty,
"\nActive Source Group RPT IIF OIL\n");
}
@@ -1317,15 +1343,24 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
pim_sgaddr *sg, json_object *json)
{
struct pim_upstream *up;
+ struct ttable *tt = NULL;
+ char *table = NULL;
time_t now;
json_object *json_group = NULL;
json_object *json_row = NULL;
now = pim_time_monotonic_sec();
- if (!json)
- vty_out(vty,
- "Iif Source Group State Uptime JoinTimer RSTimer KATimer RefCnt\n");
+ if (!json) {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Iif|Source|Group|State|Uptime|JoinTimer|RSTimer|KATimer|RefCnt");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
char uptime[10];
@@ -1446,8 +1481,8 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
json_object_int_add(json_row, "sptBit", up->sptbit);
json_object_object_add(json_group, src_str, json_row);
} else {
- vty_out(vty,
- "%-16s%-15pPAs %-15pPAs %-11s %-8s %-9s %-9s %-9s %6d\n",
+ ttable_add_row(tt,
+ "%s|%pPAs|%pPAs|%s|%s|%s|%s|%s|%d",
up->rpf.source_nexthop.interface
? up->rpf.source_nexthop.interface->name
: "Unknown",
@@ -1455,12 +1490,20 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
join_timer, rs_timer, ka_timer, up->ref_count);
}
}
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
}
static void pim_show_join_desired_helper(struct pim_instance *pim,
struct vty *vty,
struct pim_upstream *up,
- json_object *json, bool uj)
+ json_object *json, bool uj,
+ struct ttable *tt)
{
json_object *json_group = NULL;
json_object *json_row = NULL;
@@ -1491,45 +1534,68 @@ static void pim_show_join_desired_helper(struct pim_instance *pim,
json_object_object_add(json_group, src_str, json_row);
} else {
- vty_out(vty, "%-15pPAs %-15pPAs %-6s\n", &up->sg.src,
- &up->sg.grp,
- pim_upstream_evaluate_join_desired(pim, up) ? "yes"
- : "no");
+ ttable_add_row(tt, "%pPAs|%pPAs|%s", &up->sg.src, &up->sg.grp,
+ pim_upstream_evaluate_join_desired(pim, up)
+ ? "yes"
+ : "no");
}
}
void pim_show_join_desired(struct pim_instance *pim, struct vty *vty, bool uj)
{
struct pim_upstream *up;
+ struct ttable *tt = NULL;
+ char *table = NULL;
json_object *json = NULL;
if (uj)
json = json_object_new_object();
- else
- vty_out(vty, "Source Group EvalJD\n");
+ else {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Source|Group|EvalJD");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
/* scan all interfaces */
- pim_show_join_desired_helper(pim, vty, up, json, uj);
+ pim_show_join_desired_helper(pim, vty, up, json, uj, tt);
}
if (uj)
vty_json(vty, json);
+ else {
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
}
void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj)
{
struct pim_upstream *up;
+ struct ttable *tt = NULL;
+ char *table = NULL;
json_object *json = NULL;
json_object *json_group = NULL;
json_object *json_row = NULL;
if (uj)
json = json_object_new_object();
- else
- vty_out(vty,
- "Source Group RpfIface RibNextHop RpfAddress \n");
+ else {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt,
+ "Source|Group|RpfIface|RibNextHop|RpfAddress");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
struct pim_rpf *rpf;
@@ -1571,16 +1637,22 @@ void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj)
&rpf->rpf_addr);
json_object_object_add(json_group, src_str, json_row);
} else {
- vty_out(vty,
- "%-15pPAs %-15pPAs %-16s %-15pPA %-15pPA\n",
- &up->sg.src, &up->sg.grp, rpf_ifname,
- &rpf->source_nexthop.mrib_nexthop_addr,
- &rpf->rpf_addr);
+ ttable_add_row(tt, "%pPAs|%pPAs|%s|%pPA|%pPA",
+ &up->sg.src, &up->sg.grp, rpf_ifname,
+ &rpf->source_nexthop.mrib_nexthop_addr,
+ &rpf->rpf_addr);
}
}
if (uj)
vty_json(vty, json);
+ else {
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
}
static void pim_show_join_helper(struct vty *vty, struct pim_interface *pim_ifp,
@@ -1755,13 +1827,14 @@ void pim_show_join(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
}
}
-static void pim_show_jp_agg_helper(struct vty *vty, struct interface *ifp,
+static void pim_show_jp_agg_helper(struct interface *ifp,
struct pim_neighbor *neigh,
- struct pim_upstream *up, int is_join)
+ struct pim_upstream *up, int is_join,
+ struct ttable *tt)
{
- vty_out(vty, "%-16s %-15pPAs %-15pPAs %-15pPAs %5s\n", ifp->name,
- &neigh->source_addr, &up->sg.src, &up->sg.grp,
- is_join ? "J" : "P");
+ ttable_add_row(tt, "%s|%pPAs|%pPAs|%pPAs|%s", ifp->name,
+ &neigh->source_addr, &up->sg.src, &up->sg.grp,
+ is_join ? "J" : "P");
}
int pim_show_jp_agg_list_cmd_helper(const char *vrf, struct vty *vty)
@@ -1797,9 +1870,15 @@ void pim_show_jp_agg_list(struct pim_instance *pim, struct vty *vty)
struct pim_jp_agg_group *jag;
struct listnode *js_node;
struct pim_jp_sources *js;
+ struct ttable *tt;
+ char *table;
- vty_out(vty,
- "Interface RPF Nbr Source Group State\n");
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Interface|RPF Nbr|Source|Group|State");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
FOR_ALL_INTERFACES (pim->vrf, ifp) {
pim_ifp = ifp->info;
@@ -1812,13 +1891,19 @@ void pim_show_jp_agg_list(struct pim_instance *pim, struct vty *vty)
jag_node, jag)) {
for (ALL_LIST_ELEMENTS_RO(jag->sources, js_node,
js)) {
- pim_show_jp_agg_helper(vty, ifp, neigh,
+ pim_show_jp_agg_helper(ifp, neigh,
js->up,
- js->is_join);
+ js->is_join, tt);
}
}
}
}
+
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
}
int pim_show_membership_cmd_helper(const char *vrf, struct vty *vty, bool uj)
@@ -1953,10 +2038,10 @@ void pim_show_membership(struct pim_instance *pim, struct vty *vty, bool uj)
}
}
-static void pim_show_channel_helper(struct pim_instance *pim, struct vty *vty,
+static void pim_show_channel_helper(struct pim_instance *pim,
struct pim_interface *pim_ifp,
struct pim_ifchannel *ch, json_object *json,
- bool uj)
+ bool uj, struct ttable *tt)
{
struct pim_upstream *up = ch->upstream;
json_object *json_group = NULL;
@@ -1999,17 +2084,17 @@ static void pim_show_channel_helper(struct pim_instance *pim, struct vty *vty,
&up->sg.src);
} else {
- vty_out(vty,
- "%-16s %-15pPAs %-15pPAs %-10s %-5s %-10s %-11s %-6s\n",
- ch->interface->name, &up->sg.src, &up->sg.grp,
- pim_macro_ch_lost_assert(ch) ? "yes" : "no",
- pim_macro_chisin_joins(ch) ? "yes" : "no",
- pim_macro_chisin_pim_include(ch) ? "yes" : "no",
- PIM_UPSTREAM_FLAG_TEST_DR_JOIN_DESIRED(up->flags)
- ? "yes"
- : "no",
- pim_upstream_evaluate_join_desired(pim, up) ? "yes"
- : "no");
+ ttable_add_row(tt, "%s|%pPAs|%pPAs|%s|%s|%s|%s|%s",
+ ch->interface->name, &up->sg.src, &up->sg.grp,
+ pim_macro_ch_lost_assert(ch) ? "yes" : "no",
+ pim_macro_chisin_joins(ch) ? "yes" : "no",
+ pim_macro_chisin_pim_include(ch) ? "yes" : "no",
+ PIM_UPSTREAM_FLAG_TEST_DR_JOIN_DESIRED(up->flags)
+ ? "yes"
+ : "no",
+ pim_upstream_evaluate_join_desired(pim, up)
+ ? "yes"
+ : "no");
}
}
@@ -2018,14 +2103,22 @@ void pim_show_channel(struct pim_instance *pim, struct vty *vty, bool uj)
struct pim_interface *pim_ifp;
struct pim_ifchannel *ch;
struct interface *ifp;
-
+ struct ttable *tt = NULL;
json_object *json = NULL;
+ char *table = NULL;
if (uj)
json = json_object_new_object();
- else
- vty_out(vty,
- "Interface Source Group LostAssert Joins PimInclude JoinDesired EvalJD\n");
+ else {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Interface|Source|Group|LostAssert|Joins|PimInclude|JoinDesired|EvalJD");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
/* scan per-interface (S,G) state */
FOR_ALL_INTERFACES (pim->vrf, ifp) {
@@ -2033,16 +2126,21 @@ void pim_show_channel(struct pim_instance *pim, struct vty *vty, bool uj)
if (!pim_ifp)
continue;
-
RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
/* scan all interfaces */
- pim_show_channel_helper(pim, vty, pim_ifp, ch, json,
- uj);
+ pim_show_channel_helper(pim, pim_ifp, ch, json, uj, tt);
}
}
if (uj)
vty_json(vty, json);
+ else {
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
}
int pim_show_channel_cmd_helper(const char *vrf, struct vty *vty, bool uj)
@@ -2218,6 +2316,7 @@ void pim_show_interfaces(struct pim_instance *pim, struct vty *vty, bool mlag,
address, neighbors, pimdr, firsthpr,
pimifchnl);
}
+ json_object_free(json);
/* Dump the generated table. */
table = ttable_dump(tt, "\n");
@@ -3817,7 +3916,6 @@ void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
static void show_mroute_count_per_channel_oil(struct channel_oil *c_oil,
json_object *json,
- struct vty *vty,
struct ttable *tt)
{
json_object *json_group = NULL;
@@ -3885,10 +3983,10 @@ void show_mroute_count(struct pim_instance *pim, struct vty *vty,
/* Print PIM and IGMP route counts */
frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil)
- show_mroute_count_per_channel_oil(c_oil, json, vty, tt);
+ show_mroute_count_per_channel_oil(c_oil, json, tt);
for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, sr))
- show_mroute_count_per_channel_oil(&sr->c_oil, json, vty, tt);
+ show_mroute_count_per_channel_oil(&sr->c_oil, json, tt);
/* Dump the generated table. */
if (!json) {
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index 0fb5e8c6d9..e03e5a2630 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -115,7 +115,7 @@ static int pim_sec_addr_comp(const void *p1, const void *p2)
return 0;
}
-struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
+struct pim_interface *pim_if_new(struct interface *ifp, bool gm, bool pim,
bool ispimreg, bool is_vxlan_term)
{
struct pim_interface *pim_ifp;
@@ -154,9 +154,7 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
pim_ifp->pim_enable = pim;
pim_ifp->pim_passive_enable = false;
-#if PIM_IPV == 4
- pim_ifp->gm_enable = igmp;
-#endif
+ pim_ifp->gm_enable = gm;
pim_ifp->gm_join_list = NULL;
pim_ifp->pim_neighbor_list = NULL;
@@ -810,7 +808,7 @@ void pim_if_addr_add_all(struct interface *ifp)
ifp->name);
}
/*
- * PIM or IGMP is enabled on interface, and there is at least one
+ * PIM or IGMP/MLD is enabled on interface, and there is at least one
* address assigned, then try to create a vif_index.
*/
if (pim_ifp->mroute_vif_index < 0) {
diff --git a/pimd/pim_igmp.c b/pimd/pim_igmp.c
index 6ffeeb9657..fdc56fd3f3 100644
--- a/pimd/pim_igmp.c
+++ b/pimd/pim_igmp.c
@@ -1008,12 +1008,11 @@ static void igmp_group_count_incr(struct pim_interface *pim_ifp)
{
uint32_t group_count = listcount(pim_ifp->gm_group_list);
- ++pim_ifp->pim->igmp_group_count;
- if (pim_ifp->pim->igmp_group_count ==
- pim_ifp->pim->gm_watermark_limit) {
+ ++pim_ifp->pim->gm_group_count;
+ if (pim_ifp->pim->gm_group_count == pim_ifp->pim->gm_watermark_limit) {
zlog_warn(
"IGMP group count reached watermark limit: %u(vrf: %s)",
- pim_ifp->pim->igmp_group_count,
+ pim_ifp->pim->gm_group_count,
VRF_LOGNAME(pim_ifp->pim->vrf));
}
@@ -1023,13 +1022,13 @@ static void igmp_group_count_incr(struct pim_interface *pim_ifp)
static void igmp_group_count_decr(struct pim_interface *pim_ifp)
{
- if (pim_ifp->pim->igmp_group_count == 0) {
+ if (pim_ifp->pim->gm_group_count == 0) {
zlog_warn("Cannot decrement igmp group count below 0(vrf: %s)",
VRF_LOGNAME(pim_ifp->pim->vrf));
return;
}
- --pim_ifp->pim->igmp_group_count;
+ --pim_ifp->pim->gm_group_count;
}
void igmp_group_delete(struct gm_group *group)
diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h
index 0da881557c..57bc74efb4 100644
--- a/pimd/pim_instance.h
+++ b/pimd/pim_instance.h
@@ -173,7 +173,7 @@ struct pim_instance {
int gm_socket;
struct thread *t_gm_recv;
- unsigned int igmp_group_count;
+ unsigned int gm_group_count;
unsigned int gm_watermark_limit;
unsigned int keep_alive_time;
unsigned int rp_keep_alive_time;
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index 408e86b698..aaad56e543 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -348,8 +348,7 @@ static bool is_pim_interface(const struct lyd_node *dnode)
return false;
}
-#if PIM_IPV == 4
-static int pim_cmd_igmp_start(struct interface *ifp)
+static int pim_cmd_gm_start(struct interface *ifp)
{
struct pim_interface *pim_ifp;
uint8_t need_startup = 0;
@@ -377,7 +376,6 @@ static int pim_cmd_igmp_start(struct interface *ifp)
return NB_OK;
}
-#endif /* PIM_IPV == 4 */
/*
* CLI reconfiguration affects the interface level (struct pim_interface).
@@ -456,14 +454,17 @@ static void change_query_interval(struct pim_interface *pim_ifp,
}
#endif
-#if PIM_IPV == 4
-static void change_query_max_response_time(struct pim_interface *pim_ifp,
- int query_max_response_time_dsec)
+static void change_query_max_response_time(struct interface *ifp,
+ int query_max_response_time_dsec)
{
+#if PIM_IPV == 4
struct listnode *sock_node;
struct gm_sock *igmp;
struct listnode *grp_node;
struct gm_group *grp;
+#endif
+
+ struct pim_interface *pim_ifp = ifp->info;
if (pim_ifp->gm_query_max_response_time_dsec ==
query_max_response_time_dsec)
@@ -471,6 +472,9 @@ static void change_query_max_response_time(struct pim_interface *pim_ifp,
pim_ifp->gm_query_max_response_time_dsec = query_max_response_time_dsec;
+#if PIM_IPV == 6
+ gm_ifp_update(ifp);
+#else
/*
* Below we modify socket/group/source timers in order to quickly
* reflect the change. Otherwise, those timers would args->eventually
@@ -503,8 +507,8 @@ static void change_query_max_response_time(struct pim_interface *pim_ifp,
igmp_source_reset_gmi(grp, src);
}
}
+#endif /* PIM_IPV == 4 */
}
-#endif
int routing_control_plane_protocols_name_validate(
struct nb_cb_create_args *args)
@@ -2584,7 +2588,6 @@ int lib_interface_gmp_address_family_destroy(struct nb_cb_destroy_args *args)
int lib_interface_gmp_address_family_enable_modify(
struct nb_cb_modify_args *args)
{
-#if PIM_IPV == 4
struct interface *ifp;
bool gm_enable;
struct pim_interface *pim_ifp;
@@ -2600,9 +2603,10 @@ int lib_interface_gmp_address_family_enable_modify(
/* Limiting mcast interfaces to number of VIFs */
if (mcast_if_count == MAXVIFS) {
ifp_name = yang_dnode_get_string(if_dnode, "name");
- snprintf(args->errmsg, args->errmsg_len,
- "Max multicast interfaces(%d) Reached. Could not enable IGMP on interface %s",
- MAXVIFS, ifp_name);
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "Max multicast interfaces(%d) Reached. Could not enable %s on interface %s",
+ MAXVIFS, GM, ifp_name);
return NB_ERR_VALIDATION;
}
break;
@@ -2614,7 +2618,7 @@ int lib_interface_gmp_address_family_enable_modify(
gm_enable = yang_dnode_get_bool(args->dnode, NULL);
if (gm_enable)
- return pim_cmd_igmp_start(ifp);
+ return pim_cmd_gm_start(ifp);
else {
pim_ifp = ifp->info;
@@ -2626,15 +2630,16 @@ int lib_interface_gmp_address_family_enable_modify(
pim_if_membership_clear(ifp);
+#if PIM_IPV == 4
pim_if_addr_del_all_igmp(ifp);
+#else
+ gm_ifp_teardown(ifp);
+#endif
if (!pim_ifp->pim_enable)
pim_if_delete(ifp);
}
}
-#else
- /* TBD Depends on MLD data structure changes */
-#endif /* PIM_IPV == 4 */
return NB_OK;
}
@@ -2798,7 +2803,6 @@ int lib_interface_gmp_address_family_query_interval_modify(
int lib_interface_gmp_address_family_query_max_response_time_modify(
struct nb_cb_modify_args *args)
{
-#if PIM_IPV == 4
struct interface *ifp;
int query_max_response_time_dsec;
@@ -2811,13 +2815,9 @@ int lib_interface_gmp_address_family_query_max_response_time_modify(
ifp = nb_running_get_entry(args->dnode, NULL, true);
query_max_response_time_dsec =
yang_dnode_get_uint16(args->dnode, NULL);
- change_query_max_response_time(ifp->info,
- query_max_response_time_dsec);
+ change_query_max_response_time(ifp,
+ query_max_response_time_dsec);
}
-#else
- /* TBD Depends on MLD data structure changes */
-#endif
-
return NB_OK;
}
@@ -2828,7 +2828,6 @@ int lib_interface_gmp_address_family_query_max_response_time_modify(
int lib_interface_gmp_address_family_last_member_query_interval_modify(
struct nb_cb_modify_args *args)
{
-#if PIM_IPV == 4
struct interface *ifp;
struct pim_interface *pim_ifp;
int last_member_query_interval;
@@ -2848,9 +2847,6 @@ int lib_interface_gmp_address_family_last_member_query_interval_modify(
break;
}
-#else
- /* TBD Depends on MLD data structure changes */
-#endif
return NB_OK;
}
@@ -2861,7 +2857,6 @@ int lib_interface_gmp_address_family_last_member_query_interval_modify(
int lib_interface_gmp_address_family_robustness_variable_modify(
struct nb_cb_modify_args *args)
{
-#if PIM_IPV == 4
struct interface *ifp;
struct pim_interface *pim_ifp;
int last_member_query_count;
@@ -2880,9 +2875,6 @@ int lib_interface_gmp_address_family_robustness_variable_modify(
break;
}
-#else
- /* TBD Depends on MLD data structure changes */
-#endif
return NB_OK;
}
diff --git a/pimd/pim_neighbor.c b/pimd/pim_neighbor.c
index 6d6dbb6465..7726ac00b0 100644
--- a/pimd/pim_neighbor.c
+++ b/pimd/pim_neighbor.c
@@ -441,15 +441,6 @@ struct pim_neighbor *pim_neighbor_find(struct interface *ifp,
return NULL;
}
-struct pim_neighbor *pim_neighbor_find_prefix(struct interface *ifp,
- const struct prefix *src_prefix)
-{
- pim_addr addr;
-
- addr = pim_addr_from_prefix(src_prefix);
- return pim_neighbor_find(ifp, addr);
-}
-
/*
* Find the *one* interface out
* this interface. If more than
diff --git a/pimd/pim_neighbor.h b/pimd/pim_neighbor.h
index 2673d22480..a2a2df9e04 100644
--- a/pimd/pim_neighbor.h
+++ b/pimd/pim_neighbor.h
@@ -52,8 +52,6 @@ void pim_neighbor_timer_reset(struct pim_neighbor *neigh, uint16_t holdtime);
void pim_neighbor_free(struct pim_neighbor *neigh);
struct pim_neighbor *pim_neighbor_find(struct interface *ifp,
pim_addr source_addr);
-struct pim_neighbor *pim_neighbor_find_prefix(struct interface *ifp,
- const struct prefix *src_prefix);
struct pim_neighbor *pim_neighbor_find_by_secondary(struct interface *ifp,
struct prefix *src);
struct pim_neighbor *pim_neighbor_find_if(struct interface *ifp);
diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c
index 783c9b97e7..1dce6b3562 100644
--- a/pimd/pim_rp.c
+++ b/pimd/pim_rp.c
@@ -51,6 +51,7 @@
#include "pim_bsm.h"
#include "pim_util.h"
#include "pim_ssm.h"
+#include "termtable.h"
/* Cleanup pim->rpf_hash each node data */
void pim_rp_list_hash_clean(void *data)
@@ -1166,14 +1167,25 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
struct rp_info *rp_info;
struct rp_info *prev_rp_info = NULL;
struct listnode *node;
+ struct ttable *tt = NULL;
+ char *table = NULL;
char source[7];
+ char grp[INET6_ADDRSTRLEN];
json_object *json_rp_rows = NULL;
json_object *json_row = NULL;
- if (!json)
- vty_out(vty,
- "RP address group/prefix-list OIF I am RP Source Group-Type\n");
+ if (!json) {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "RP address|group/prefix-list|OIF|I am RP|Source|Group-Type");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
continue;
@@ -1243,32 +1255,31 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
json_object_array_add(json_rp_rows, json_row);
} else {
- vty_out(vty, "%-15pPA ", &rp_info->rp.rpf_addr);
-
- if (rp_info->plist)
- vty_out(vty, "%-18s ", rp_info->plist);
- else
- vty_out(vty, "%-18pFX ", &rp_info->group);
-
- if (rp_info->rp.source_nexthop.interface)
- vty_out(vty, "%-16s ",
- rp_info->rp.source_nexthop
- .interface->name);
- else
- vty_out(vty, "%-16s ", "(Unknown)");
-
- if (rp_info->i_am_rp)
- vty_out(vty, "yes");
- else
- vty_out(vty, "no");
-
- vty_out(vty, "%14s", source);
- vty_out(vty, "%6s\n", group_type);
+ prefix2str(&rp_info->group, grp, sizeof(grp));
+ ttable_add_row(tt, "%pPA|%s|%s|%s|%s|%s",
+ &rp_info->rp.rpf_addr,
+ rp_info->plist
+ ? rp_info->plist
+ : grp,
+ rp_info->rp.source_nexthop.interface
+ ? rp_info->rp.source_nexthop
+ .interface->name
+ : "Unknown",
+ rp_info->i_am_rp
+ ? "yes"
+ : "no",
+ source, group_type);
}
prev_rp_info = rp_info;
}
- if (json) {
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ } else {
if (prev_rp_info && json_rp_rows)
json_object_object_addf(json, json_rp_rows, "%pPA",
&prev_rp_info->rp.rpf_addr);
diff --git a/pimd/pim_sock.c b/pimd/pim_sock.c
index b5a055c6aa..4b91bf07d9 100644
--- a/pimd/pim_sock.c
+++ b/pimd/pim_sock.c
@@ -185,7 +185,7 @@ int pim_reg_sock(void)
long flags;
frr_with_privs (&pimd_privs) {
- fd = socket(PIM_AF, SOCK_RAW, IPPROTO_RAW);
+ fd = socket(PIM_AF, SOCK_RAW, PIM_PROTO_REG);
}
if (fd < 0) {
diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c
index 3d5d68b1f4..c18652f72e 100644
--- a/pimd/pim_vty.c
+++ b/pimd/pim_vty.c
@@ -370,11 +370,37 @@ static int gm_config_write(struct vty *vty, int writes,
static int gm_config_write(struct vty *vty, int writes,
struct pim_interface *pim_ifp)
{
+ /* IF ipv6 mld */
+ if (pim_ifp->gm_enable) {
+ vty_out(vty, " ipv6 mld\n");
+ ++writes;
+ }
+
if (pim_ifp->mld_version != MLD_DEFAULT_VERSION)
vty_out(vty, " ipv6 mld version %d\n", pim_ifp->mld_version);
+
+ /* IF ipv6 mld query-max-response-time */
+ if (pim_ifp->gm_query_max_response_time_dsec !=
+ IGMP_QUERY_MAX_RESPONSE_TIME_DSEC)
+ vty_out(vty, " ipv6 mld query-max-response-time %d\n",
+ pim_ifp->gm_query_max_response_time_dsec);
+
if (pim_ifp->gm_default_query_interval != IGMP_GENERAL_QUERY_INTERVAL)
vty_out(vty, " ipv6 mld query-interval %d\n",
pim_ifp->gm_default_query_interval);
+
+ /* IF ipv6 mld last-member_query-count */
+ if (pim_ifp->gm_last_member_query_count !=
+ IGMP_DEFAULT_ROBUSTNESS_VARIABLE)
+ vty_out(vty, " ipv6 mld last-member-query-count %d\n",
+ pim_ifp->gm_last_member_query_count);
+
+ /* IF ipv6 mld last-member_query-interval */
+ if (pim_ifp->gm_specific_query_max_response_time_dsec !=
+ IGMP_SPECIFIC_QUERY_MAX_RESPONSE_TIME_DSEC)
+ vty_out(vty, " ipv6 mld last-member-query-interval %d\n",
+ pim_ifp->gm_specific_query_max_response_time_dsec);
+
return 0;
}
#endif
diff --git a/ripd/ripd.c b/ripd/ripd.c
index 1a0db4a7d9..8a321d9a91 100644
--- a/ripd/ripd.c
+++ b/ripd/ripd.c
@@ -996,6 +996,7 @@ static size_t rip_auth_md5_ah_write(struct stream *s, struct rip_interface *ri,
struct key *key)
{
size_t doff = 0;
+ static uint32_t seq = 0;
assert(s && ri && ri->auth_type == RIP_AUTH_MD5);
@@ -1028,7 +1029,7 @@ static size_t rip_auth_md5_ah_write(struct stream *s, struct rip_interface *ri,
/* RFC2080: The value used in the sequence number is
arbitrary, but two suggestions are the time of the
message's creation or a simple message counter. */
- stream_putl(s, time(NULL));
+ stream_putl(s, ++seq);
/* Reserved field must be zero. */
stream_putl(s, 0);
diff --git a/sharpd/sharp_vty.c b/sharpd/sharp_vty.c
index 2281b3ce26..3853df7cb0 100644
--- a/sharpd/sharp_vty.c
+++ b/sharpd/sharp_vty.c
@@ -234,6 +234,8 @@ DEFPY (install_routes,
memset(&prefix, 0, sizeof(prefix));
memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix));
+ nexthop_del_srv6_seg6local(&sg.r.nhop);
+ nexthop_del_srv6_seg6(&sg.r.nhop);
memset(&sg.r.nhop, 0, sizeof(sg.r.nhop));
memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group));
memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop));
@@ -376,6 +378,8 @@ DEFPY (install_seg6_routes,
memset(&prefix, 0, sizeof(prefix));
memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix));
+ nexthop_del_srv6_seg6local(&sg.r.nhop);
+ nexthop_del_srv6_seg6(&sg.r.nhop);
memset(&sg.r.nhop, 0, sizeof(sg.r.nhop));
memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group));
memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop));
@@ -467,6 +471,8 @@ DEFPY (install_seg6local_routes,
sg.r.repeat = 0;
memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix));
+ nexthop_del_srv6_seg6local(&sg.r.nhop);
+ nexthop_del_srv6_seg6(&sg.r.nhop);
memset(&sg.r.nhop, 0, sizeof(sg.r.nhop));
memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group));
memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop));
@@ -924,6 +930,11 @@ DEFPY (import_te,
return CMD_SUCCESS;
}
+static void sharp_srv6_locator_chunk_free(struct prefix_ipv6 *chunk)
+{
+ prefix_ipv6_free((struct prefix_ipv6 **)&chunk);
+}
+
DEFPY (sharp_srv6_manager_get_locator_chunk,
sharp_srv6_manager_get_locator_chunk_cmd,
"sharp srv6-manager get-locator-chunk NAME$locator_name",
@@ -947,6 +958,8 @@ DEFPY (sharp_srv6_manager_get_locator_chunk,
loc = XCALLOC(MTYPE_SRV6_LOCATOR,
sizeof(struct sharp_srv6_locator));
loc->chunks = list_new();
+ loc->chunks->del =
+ (void (*)(void *))sharp_srv6_locator_chunk_free;
snprintf(loc->name, SRV6_LOCNAME_SIZE, "%s", locator_name);
listnode_add(sg.srv6_locators, loc);
}
@@ -1096,6 +1109,7 @@ DEFPY (sharp_srv6_manager_release_locator_chunk,
list_delete_all_node(loc->chunks);
list_delete(&loc->chunks);
listnode_delete(sg.srv6_locators, loc);
+ XFREE(MTYPE_SRV6_LOCATOR, loc);
break;
}
}
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py
index c8cdc7ec5c..4d7f436eac 100644
--- a/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py
+++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py
@@ -22,6 +22,7 @@
Following tests are covered.
1. Verify default-originate route with default static and network command
2. Verify default-originate route with aggregate summary command
+3. Verfiy default-originate behaviour in ecmp
"""
import os
import sys
@@ -48,7 +49,10 @@ from lib.bgp import (
from lib.common_config import (
verify_fib_routes,
step,
+ create_prefix_lists,
run_frr_cmd,
+ create_route_maps,
+ shutdown_bringup_interface,
get_frr_ipv6_linklocal,
start_topology,
apply_raw_config,
@@ -296,6 +300,78 @@ def verify_the_uptime(time_stamp_before, time_stamp_after, incremented=None):
return True
+def get_best_path_route_in_FIB(tgen, topo, dut, network):
+ """
+ API to verify the best route in FIB and return the ipv4 and ipv6 nexthop for the given route
+ command
+ =======
+ show ip route
+ show ipv6 route
+ params
+ ======
+ dut : device under test :
+ network ; route (ip) to which the best route to be retrieved
+ Returns
+ ========
+ on success : return dict with next hops for the best hop
+ on failure : return error message with boolean False
+ """
+ is_ipv4_best_path_found = False
+ is_ipv6_best_path_found = False
+ rnode = tgen.routers()[dut]
+ ipv4_show_bgp_json = run_frr_cmd(rnode, "sh ip bgp json ", isjson=True)
+ ipv6_show_bgp_json = run_frr_cmd(
+ rnode, "sh ip bgp ipv6 unicast json ", isjson=True
+ )
+ output_dict = {"ipv4": None, "ipv6": None}
+ ipv4_nxt_hop_count = len(ipv4_show_bgp_json["routes"][network["ipv4"]])
+ for index in range(ipv4_nxt_hop_count):
+ if "bestpath" in ipv4_show_bgp_json["routes"][network["ipv4"]][index].keys():
+ best_path_ip = ipv4_show_bgp_json["routes"][network["ipv4"]][index][
+ "nexthops"
+ ][0]["ip"]
+ output_dict["ipv4"] = best_path_ip
+ logger.info(
+ "[DUT [{}]] Best path for the route {} is {} ".format(
+ dut, network["ipv4"], best_path_ip
+ )
+ )
+ is_ipv4_best_path_found = True
+ else:
+ logger.error("ERROR....! No Best Path Found in BGP RIB.... FAILED")
+
+ ipv6_nxt_hop_count = len(ipv6_show_bgp_json["routes"][network["ipv6"]])
+ for index in range(ipv6_nxt_hop_count):
+ if "bestpath" in ipv6_show_bgp_json["routes"][network["ipv6"]][index].keys():
+ ip_add_count = len(
+ ipv6_show_bgp_json["routes"][network["ipv6"]][index]["nexthops"]
+ )
+ for i_index in range(ip_add_count):
+ if (
+ "global"
+ in ipv6_show_bgp_json["routes"][network["ipv6"]][index]["nexthops"][
+ i_index
+ ]["scope"]
+ ):
+ best_path_ip = ipv6_show_bgp_json["routes"][network["ipv6"]][index][
+ "nexthops"
+ ][i_index]["ip"]
+ output_dict["ipv6"] = best_path_ip
+ logger.info(
+ "[DUT [{}]] Best path for the route {} is {} ".format(
+ dut, network["ipv6"], best_path_ip
+ )
+ )
+
+ else:
+ logger.error("ERROR....! No Best Path Found in BGP RIB.... FAILED")
+ if is_ipv4_best_path_found:
+ return output_dict
+ else:
+ logger.error("ERROR...! Unable to find the Best Path in the RIB")
+ return False
+
+
#####################################################
#
# Testcases
@@ -1409,6 +1485,326 @@ def test_verify_bgp_default_originate_with_aggregate_summary_p1(request):
write_test_footer(tc_name)
+def test_verify_default_originate_with_2way_ecmp_p2(request):
+ """
+ Summary: "Verify default-originate route with 3 way ECMP and traffic "
+ """
+
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global DEFAULT_ROUTES
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ step("Populating next-hops details")
+ r1_r2_ipv4_neighbor_ips = []
+ r1_r2_ipv6_neighbor_ips = []
+ r1_link = None
+ for index in range(1, 3):
+ r1_link = "r1-link" + str(index)
+ r1_r2_ipv4_neighbor_ips.append(
+ topo["routers"]["r2"]["links"][r1_link]["ipv4"].split("/")[0]
+ )
+ r1_r2_ipv6_neighbor_ips.append(
+ topo["routers"]["r2"]["links"][r1_link]["ipv6"].split("/")[0]
+ )
+
+ step(
+ "Configure default-originate on R1 for all the neighbor of IPv4 and IPv6 peers "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ for index in range(2):
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "router bgp {}".format(local_as),
+ "address-family ipv4 unicast",
+ "neighbor {} default-originate".format(
+ r1_r2_ipv4_neighbor_ips[index]
+ ),
+ "exit-address-family",
+ "address-family ipv6 unicast",
+ "neighbor {} default-originate ".format(
+ r1_r2_ipv6_neighbor_ips[index]
+ ),
+ "exit-address-family",
+ ]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 "
+ )
+
+ r2_link = None
+ for index in range(1, 3):
+ r2_link = "r2-link" + str(index)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"][r2_link]["ipv4"].split("/")[0]
+ interface = topo["routers"]["r1"]["links"][r2_link]["interface"]
+ ipv6_link_local_nxt_hop = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local_nxt_hop}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Ping R1 configure IPv4 and IPv6 loopback address from R2")
+ pingaddr = topo["routers"]["r1"]["links"]["lo"]["ipv4"].split("/")[0]
+ router = tgen.gears["r2"]
+ output = router.run("ping -c 4 -w 4 {}".format(pingaddr))
+ assert " 0% packet loss" in output, "Ping R1->R2 FAILED"
+ logger.info("Ping from R1 to R2 ... success")
+
+ step("Shuting up the active route")
+ network = {"ipv4": "0.0.0.0/0", "ipv6": "::/0"}
+ ipv_dict = get_best_path_route_in_FIB(tgen, topo, dut="r2", network=network)
+ dut_links = topo["routers"]["r1"]["links"]
+ active_interface = None
+ for key, values in dut_links.items():
+ ipv4_address = dut_links[key]["ipv4"].split("/")[0]
+ ipv6_address = dut_links[key]["ipv6"].split("/")[0]
+ if ipv_dict["ipv4"] == ipv4_address and ipv_dict["ipv6"] == ipv6_address:
+ active_interface = dut_links[key]["interface"]
+
+ logger.info(
+ "Shutting down the interface {} on router {} ".format(active_interface, "r1")
+ )
+ shutdown_bringup_interface(tgen, "r1", active_interface, False)
+
+ step("Verify the complete convergence to fail after shutting the interface")
+ result = verify_bgp_convergence(tgen, topo, expected=False)
+ assert (
+ result is not True
+ ), " Testcase {} : After shuting down the interface Convergence is expected to be Failed".format(
+ tc_name
+ )
+
+ step(
+ "Verify routes from active best path is not received from r1 after shuting the interface"
+ )
+ r2_link = None
+ for index in range(1, 3):
+ r2_link = "r2-link" + str(index)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"][r2_link]["ipv4"].split("/")[0]
+ interface = topo["routers"]["r1"]["links"][r2_link]["interface"]
+ ipv6_link_local_nxt_hop = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local_nxt_hop}
+ if index == 1:
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ else:
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Ping R1 configure IPv4 and IPv6 loopback address from R2")
+ pingaddr = topo["routers"]["r1"]["links"]["lo"]["ipv4"].split("/")[0]
+ router = tgen.gears["r2"]
+ output = router.run("ping -c 4 -w 4 {}".format(pingaddr))
+ assert " 0% packet loss" in output, "Ping R1->R2 FAILED"
+ logger.info("Ping from R1 to R2 ... success")
+
+ step("No Shuting up the active route")
+
+ shutdown_bringup_interface(tgen, "r1", active_interface, True)
+
+ step("Verify the complete convergence after bringup the interface")
+ result = verify_bgp_convergence(tgen, topo)
+ assert (
+ result is True
+ ), " Testcase {} : After bringing up the interface complete convergence is expected ".format(
+ tc_name
+ )
+
+ step("Verify all the routes are received from r1 after no shuting the interface")
+ r2_link = None
+ for index in range(1, 3):
+ r2_link = "r2-link" + str(index)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"][r2_link]["ipv4"].split("/")[0]
+ interface = topo["routers"]["r1"]["links"][r2_link]["interface"]
+ ipv6_link_local_nxt_hop = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local_nxt_hop}
+ if index == 1:
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ else:
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure IPv4 and IPv6 route-map with deny option on R2 to filter default route 0.0.0.0/0 and 0::0/0"
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ input_dict_3 = {
+ "r2": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": DEFAULT_ROUTES["ipv4"],
+ "action": "permit",
+ }
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": DEFAULT_ROUTES["ipv6"],
+ "action": "permit",
+ }
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_3 = {
+ "r2": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "deny",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "deny",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Apply route-map IN direction of R2 ( R2-R1) for IPv4 and IPv6 BGP neighbors")
+ r2_link = None
+ for index in range(1, 3):
+ r2_link = "r2-link" + str(index)
+ input_dict_4 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ r2_link: {
+ "route_maps": [
+ {"name": "RMv4", "direction": "in"}
+ ]
+ },
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ r2_link: {
+ "route_maps": [
+ {"name": "RMv6", "direction": "in"}
+ ]
+ },
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("After applying the route-map the routes are not expected in RIB ")
+ r2_link = None
+ for index in range(1, 3):
+ r2_link = "r2-link" + str(index)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"][r2_link]["ipv4"].split("/")[0]
+ interface = topo["routers"]["r1"]["links"][r2_link]["interface"]
+ ipv6_link_local_nxt_hop = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local_nxt_hop}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_distance_change/bgp_admin_dist.json b/tests/topotests/bgp_distance_change/bgp_admin_dist.json
new file mode 100755
index 0000000000..e6a20a6585
--- /dev/null
+++ b/tests/topotests/bgp_distance_change/bgp_admin_dist.json
@@ -0,0 +1,402 @@
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "192.168.22.1/32",
+ "no_of_ip": 2,
+ "next_hop": "10.0.0.2"
+ },
+ {
+ "network": "fc07:1::1/128",
+ "no_of_ip": 2,
+ "next_hop": "fd00::2"
+ },
+ {
+ "network": "192.168.21.1/32",
+ "no_of_ip": 2,
+ "next_hop": "blackhole"
+ },
+ {
+ "network": "fc07:150::1/128",
+ "no_of_ip": 2,
+ "next_hop": "blackhole"
+ }
+ ]
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r5": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "static_routes": [
+ {
+ "network": "192.168.20.1/32",
+ "no_of_ip": 2,
+ "next_hop": "blackhole"
+ },
+ {
+ "network": "fc07:50::1/128",
+ "no_of_ip": 2,
+ "next_hop": "blackhole"
+ },
+ {
+ "network": "192.168.21.1/32",
+ "no_of_ip": 2,
+ "next_hop": "blackhole"
+ },
+ {
+ "network": "fc07:150::1/128",
+ "no_of_ip": 2,
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/bgp_distance_change/bgp_admin_dist_vrf.json b/tests/topotests/bgp_distance_change/bgp_admin_dist_vrf.json
new file mode 100755
index 0000000000..23afa2c911
--- /dev/null
+++ b/tests/topotests/bgp_distance_change/bgp_admin_dist_vrf.json
@@ -0,0 +1,429 @@
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback",
+ "vrf": "RED"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "RED"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "RED"
+ }
+ },
+ "vrfs": [
+ {
+ "name": "RED",
+ "id": "1"
+ }
+ ],
+ "bgp": [{
+ "local_as": "100",
+ "vrf": "RED",
+
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ }],
+ "static_routes": [
+ {
+ "network": "192.168.22.1/32",
+ "no_of_ip": 2,
+ "next_hop": "10.0.0.2",
+ "vrf": "RED"
+ },
+ {
+ "network": "fc07:1::1/128",
+ "no_of_ip": 2,
+ "next_hop": "fd00::2",
+ "vrf": "RED"
+ },
+ {
+ "network": "192.168.21.1/32",
+ "no_of_ip": 2,
+ "next_hop": "blackhole",
+ "vrf": "RED"
+ },
+ {
+ "network": "fc07:150::1/128",
+ "no_of_ip": 2,
+ "next_hop": "blackhole",
+ "vrf": "RED"
+ }
+ ]
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback",
+ "vrf": "RED"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "RED"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "RED"
+ }
+ },
+ "vrfs": [
+ {
+ "name": "RED",
+ "id": "1"
+ }
+ ],
+ "bgp": [{
+ "local_as": "100",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }]
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback",
+ "vrf": "RED"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "RED"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "RED"
+ },
+ "r4": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "RED"
+ },
+ "r5": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "RED"
+ }
+ },
+ "vrfs": [
+ {
+ "name": "RED",
+ "id": "1"
+ }
+ ],
+ "bgp": [{
+ "local_as": "100",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }]
+ },
+ "r4": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback",
+ "vrf": "RED"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "RED"
+ }
+ },
+ "vrfs": [
+ {
+ "name": "RED",
+ "id": "1"
+ }
+ ],
+ "bgp": [{
+ "local_as": "200",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }]
+ },
+ "r5": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback",
+ "vrf": "RED"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "vrf": "RED"
+ }
+ },
+ "vrfs": [
+ {
+ "name": "RED",
+ "id": "1"
+ }
+ ],
+ "bgp": [{
+ "local_as": "300",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ }
+ }
+ }],
+ "static_routes": [
+ {
+ "network": "192.168.20.1/32",
+ "no_of_ip": 2,
+ "next_hop": "blackhole",
+ "vrf": "RED"
+ },
+ {
+ "network": "fc07:50::1/128",
+ "no_of_ip": 2,
+ "next_hop": "blackhole",
+ "vrf": "RED"
+ },
+ {
+ "network": "192.168.21.1/32",
+ "no_of_ip": 2,
+ "next_hop": "blackhole",
+ "vrf": "RED"
+ },
+ {
+ "network": "fc07:150::1/128",
+ "no_of_ip": 2,
+ "next_hop": "blackhole",
+ "vrf": "RED"
+ }
+ ]
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/bgp_distance_change/test_bgp_admin_dist.py b/tests/topotests/bgp_distance_change/test_bgp_admin_dist.py
new file mode 100755
index 0000000000..90c3d22240
--- /dev/null
+++ b/tests/topotests/bgp_distance_change/test_bgp_admin_dist.py
@@ -0,0 +1,1282 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+import sys
+import time
+import pytest
+import inspect
+import os
+
+
+"""Following tests are covered to test bgp admin distance functionality.
+TC_1:
+ Verify bgp admin distance functionality when static route is
+ configured same as ebgp learnt route
+
+TC_2:
+ Verify ebgp admin distance functionality with ECMP.
+
+TC_3:
+ Verify ibgp admin distance functionality when static route is
+ configured same as bgp learnt route.
+TC_4:
+ Verify ibgp admin distance functionality with ECMP.
+
+TC_7: Chaos - Verify bgp admin distance functionality with chaos.
+"""
+
+#################################
+# TOPOLOGY
+#################################
+"""
+
+ +-------+
+ +--------- | R2 |
+ | +-------+
+ |iBGP |
+ +-------+ |
+ | R1 | |iBGP
+ +-------+ |
+ | |
+ | iBGP +-------+ eBGP +-------+
+ +---------- | R3 |----------| R4 |
+ +-------+ +-------+
+ |
+ |eBGP
+ |
+ +-------+
+ | R5 |
+ +-------+
+
+
+"""
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+# Required to instantiate the topology builder class.
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ step,
+ write_test_footer,
+ create_static_routes,
+ verify_rib,
+ create_route_maps,
+ create_prefix_lists,
+ check_address_types,
+ reset_config_on_routers,
+ check_router_status,
+ stop_router,
+ kill_router_daemons,
+ start_router_daemons,
+ start_router,
+ get_frr_ipv6_linklocal,
+ verify_fib_routes,
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ verify_best_path_as_per_admin_distance,
+ clear_bgp,
+)
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+# Global variables
+topo = None
+bgp_convergence = False
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+NETWORK = {
+ "ipv4": [
+ "192.168.20.1/32",
+ "192.168.20.2/32",
+ "192.168.21.1/32",
+ "192.168.21.2/32",
+ "192.168.22.1/32",
+ "192.168.22.2/32",
+ ],
+ "ipv6": [
+ "fc07:50::1/128",
+ "fc07:50::2/128",
+ "fc07:150::1/128",
+ "fc07:150::2/128",
+ "fc07:1::1/128",
+ "fc07:1::2/128",
+ ],
+}
+
+ADDR_TYPES = check_address_types()
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ global topo
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_admin_dist.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Checking BGP convergence
+ global bgp_convergence
+ global ADDR_TYPES
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format(
+ bgp_convergence
+ )
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """teardown_module.
+
+ Teardown the pytest environment.
+ * `mod`: module name
+ """
+ logger.info("Running teardown_module to delete topology")
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+# Tests starting
+#####################################################
+def test_bgp_admin_distance_ebgp_ecmp_p0():
+ """
+ TC: 2
+ Verify ebgp admin distance functionality with ECMP.
+ """
+ tgen = get_topogen()
+ global bgp_convergence
+
+ if bgp_convergence is not True:
+ pytest.skip("skipping test case because of BGP Convergence failure at setup")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Configure base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step("Configure static route in R4 and R5, redistribute in bgp")
+
+ for addr_type in ADDR_TYPES:
+
+ input_dict = {
+ "r4": {
+ "static_routes": [{"network": NETWORK[addr_type], "next_hop": "Null0"}]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+
+ input_dict = {
+ "r5": {
+ "static_routes": [{"network": NETWORK[addr_type], "next_hop": "Null0"}]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that route is learnt in DUT via ebgp")
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ input_dict = topo["routers"]
+ dut = "r3"
+ nhop = {"ipv4": [], "ipv6": []}
+ nhop["ipv4"].append(topo["routers"]["r4"]["links"]["r3"]["ipv4"].split("/")[0])
+ nhop["ipv4"].append(topo["routers"]["r5"]["links"]["r3"]["ipv4"].split("/")[0])
+ nhop["ipv6"].append(get_frr_ipv6_linklocal(tgen, "r4", "r3-r4-eth1"))
+ nhop["ipv6"].append(get_frr_ipv6_linklocal(tgen, "r5", "r1-r3-eth1"))
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type][0], "next_hop": "Null0"}
+ ]
+ }
+ }
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Configure the static route in R3 (Dut).")
+
+ for addr_type in ADDR_TYPES:
+
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type][0], "next_hop": "Null0"}
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that static route is selected as best route in zebra.")
+
+ # Verifying RIB routes
+ protocol = "static"
+ dut = "r3"
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type][0], "next_hop": "Null0"}
+ ]
+ }
+ }
+
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step(" Configure the admin distance of 254 to static route in R3.")
+
+ for addr_type in ADDR_TYPES:
+
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type][0],
+ "next_hop": "Null0",
+ "admin_distance": 254,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that bgp routes are selected as best routes in zebra.")
+ protocol = "bgp"
+ dut = "r3"
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type][0], "next_hop": "Null0"}
+ ]
+ }
+ }
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ input_dict_1 = {
+ "r3": {
+ "bgp": {
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 254, "ibgp": 254, "local": 254}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 254, "ibgp": 254, "local": 254}
+ }
+ },
+ },
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that bgp routes are selected as best routes in zebra.")
+ # Verifying RIB routes
+ protocol = "bgp"
+ dut = "r3"
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type][0], "next_hop": "Null0"}
+ ]
+ }
+ }
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Configure bgp admin distance 10 with CLI in dut.")
+ input_dict_1 = {
+ "r3": {
+ "bgp": {
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"distance": {"ebgp": 10, "ibgp": 254, "local": 254}}
+ },
+ "ipv6": {
+ "unicast": {"distance": {"ebgp": 10, "ibgp": 254, "local": 254}}
+ },
+ },
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify ebgp routes have admin distance of 10 in dut.")
+
+ protocol = "bgp"
+ input_dict = topo["routers"]
+ dut = "r3"
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type][0], "next_hop": "Null0"}
+ ]
+ }
+ }
+ result4 = verify_rib(
+ tgen, addr_type, dut, input_dict, protocol=protocol, admin_distance=10
+ )
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step(
+ "Configure route map with weight as 200 and apply to one of the "
+ "neighbor (R4 neighbor)."
+ )
+
+ # Create Prefix list
+ input_dict_2 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_ls_1": [
+ {
+ "seqid": 10,
+ "network": NETWORK["ipv4"][0],
+ "le": "32",
+ "action": "permit",
+ }
+ ]
+ },
+ "ipv6": {
+ "pf_ls_1_ipv6": [
+ {
+ "seqid": 100,
+ "network": NETWORK["ipv6"][0],
+ "le": "128",
+ "action": "permit",
+ }
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Create route map
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RMAP_WEIGHT": [
+ {
+ "action": "permit",
+ "match": {"ipv4": {"prefix_lists": "pf_ls_1"}},
+ "set": {"weight": 200},
+ },
+ {
+ "action": "permit",
+ "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}},
+ "set": {"weight": 200},
+ },
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Configure neighbor for route map
+ input_dict_4 = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "RMAP_WEIGHT",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "RMAP_WEIGHT",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that bgp route is selected as best on by zebra in r3.")
+
+ protocol = "bgp"
+ dut = "r3"
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type][0], "next_hop": "Null0"}
+ ]
+ }
+ }
+ result4 = verify_rib(
+ tgen, addr_type, dut, input_dict, protocol=protocol, admin_distance=10
+ )
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Static route should not be selected as best route.")
+ protocol = "static"
+ dut = "r3"
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type][0], "next_hop": "Null0"}
+ ]
+ }
+ }
+ result4 = verify_fib_routes(
+ tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result4 is not True
+ ), "Testcase {} : Failed. Wrong route is selected as best route.\n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Reconfigure the static route without admin distance")
+
+ for addr_type in ADDR_TYPES:
+
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type][0],
+ "next_hop": "Null0",
+ "admin_distance": 254,
+ "delete": True,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type][0], "next_hop": "Null0"}
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that static route is installed as best route.")
+ protocol = "static"
+ dut = "r3"
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type][0], "next_hop": "Null0"}
+ ]
+ }
+ }
+ result4 = verify_rib(
+ tgen, addr_type, dut, input_dict, protocol=protocol, fib=True
+ )
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Unconfigure the static route in R3.")
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type][0],
+ "next_hop": "Null0",
+ "delete": True,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that bgp route is selected as best on by zebra in r3.")
+
+ protocol = "bgp"
+ dut = "r3"
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type][0], "next_hop": "Null0"}
+ ]
+ }
+ }
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Un configure the route map on R3.")
+
+ # Configure neighbor for route map
+ input_dict_4 = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "RMAP_WEIGHT",
+ "direction": "in",
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "RMAP_WEIGHT",
+ "direction": "in",
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify bgp routes installed in zebra.")
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ input_dict = topo["routers"]
+ dut = "r3"
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type][0], "next_hop": "Null0"}
+ ]
+ }
+ }
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_admin_distance_ibgp_p0():
+ """
+ TC: 3
+ Verify bgp admin distance functionality when static route is
+ configured same as ibgp learnt route
+ """
+ tgen = get_topogen()
+ global bgp_convergence
+
+ if bgp_convergence is not True:
+ pytest.skip("skipping test case because of BGP Convergence failure at setup")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Configure base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step("Configure bgp admin distance 200 with CLI in dut.")
+
+ input_dict_1 = {
+ "r3": {
+ "bgp": {
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ },
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify bgp routes have admin distance of 200 in dut.")
+ # Verifying best path
+ dut = "r3"
+ attribute = "admin_distance"
+
+ input_dict = {
+ "ipv4": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": "192.168.22.1/32",
+ "admin_distance": 200,
+ },
+ {
+ "network": "192.168.22.2/32",
+ "admin_distance": 200,
+ },
+ ]
+ }
+ },
+ "ipv6": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": "fc07:1::1/128",
+ "admin_distance": 200,
+ },
+ {
+ "network": "fc07:1::2/128",
+ "admin_distance": 200,
+ },
+ ]
+ }
+ },
+ }
+
+ for addr_type in ADDR_TYPES:
+ result = verify_best_path_as_per_admin_distance(
+ tgen, addr_type, dut, input_dict[addr_type], attribute
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Modify the admin distance value to 150.")
+
+ input_dict_1 = {
+ "r3": {
+ "bgp": {
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 150, "ibgp": 150, "local": 150}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 150, "ibgp": 150, "local": 150}
+ }
+ },
+ },
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify bgp routes have admin distance of 150 in dut.")
+ # Verifying best path
+ dut = "r3"
+ attribute = "admin_distance"
+
+ input_dict = {
+ "ipv4": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": "192.168.22.1/32",
+ "admin_distance": 150,
+ },
+ {
+ "network": "192.168.22.2/32",
+ "admin_distance": 150,
+ },
+ ]
+ }
+ },
+ "ipv6": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": "fc07:1::1/128",
+ "admin_distance": 150,
+ },
+ {
+ "network": "fc07:1::2/128",
+ "admin_distance": 150,
+ },
+ ]
+ }
+ },
+ }
+
+ for addr_type in ADDR_TYPES:
+ result = verify_best_path_as_per_admin_distance(
+ tgen, addr_type, dut, input_dict[addr_type], attribute
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Un configure the admin distance value on DUT")
+
+ input_dict_1 = {
+ "r3": {
+ "bgp": {
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {
+ "ebgp": 150,
+ "ibgp": 150,
+ "local": 150,
+ "delete": True,
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {
+ "ebgp": 150,
+ "ibgp": 150,
+ "local": 150,
+ "delete": True,
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify bgp routes have default admin distance in dut.")
+ # Verifying best path
+ dut = "r3"
+ attribute = "admin_distance"
+
+ input_dict = {
+ "ipv4": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": "192.168.22.1/32",
+ "admin_distance": 20,
+ },
+ {
+ "network": "192.168.22.2/32",
+ "admin_distance": 20,
+ },
+ ]
+ }
+ },
+ "ipv6": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": "fc07:1::1/128",
+ "admin_distance": 20,
+ },
+ {
+ "network": "fc07:1::2/128",
+ "admin_distance": 20,
+ },
+ ]
+ }
+ },
+ }
+
+ for addr_type in ADDR_TYPES:
+ result = verify_best_path_as_per_admin_distance(
+ tgen, addr_type, dut, input_dict[addr_type], attribute
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Learn the same route via ebgp and ibgp peer. Configure admin "
+ "distance of 200 in DUT for both ebgp and ibgp peer. "
+ )
+
+ step("Verify that ebgp route is preferred over ibgp.")
+
+ # Verifying RIB routes
+ protocol = "bgp"
+ input_dict = topo["routers"]
+
+ for addr_type in ADDR_TYPES:
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Configure static route Without any admin distance")
+
+ for addr_type in ADDR_TYPES:
+
+ input_dict = {
+ "r3": {
+ "static_routes": [{"network": NETWORK[addr_type], "next_hop": "Null0"}]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that zebra selects static route.")
+ protocol = "static"
+
+ for addr_type in ADDR_TYPES:
+
+ input_dict = {
+ "r3": {
+ "static_routes": [{"network": NETWORK[addr_type], "next_hop": "Null0"}]
+ }
+ }
+
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Configure static route with admin distance of 253")
+ for addr_type in ADDR_TYPES:
+
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": "Null0",
+ "admin_distance": 253,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that zebra selects bgp route.")
+ protocol = "bgp"
+
+ for addr_type in ADDR_TYPES:
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Configure admin distance of 254 in bgp for route.")
+
+ input_dict_1 = {
+ "r3": {
+ "bgp": {
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 254, "ibgp": 254, "local": 254}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 254, "ibgp": 254, "local": 254}
+ }
+ },
+ },
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that zebra selects static route.")
+ protocol = "static"
+
+ for addr_type in ADDR_TYPES:
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Delete the static route.")
+ for addr_type in ADDR_TYPES:
+
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": "Null0",
+ "admin_distance": 253,
+ "delete": True,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that zebra selects bgp route.")
+ protocol = "bgp"
+
+ for addr_type in ADDR_TYPES:
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_admin_distance_chaos_p2():
+ """
+ TC: 7
+ Chaos - Verify bgp admin distance functionality with chaos.
+ """
+ tgen = get_topogen()
+ global bgp_convergence
+
+ if bgp_convergence is not True:
+ pytest.skip("skipping test case because of BGP Convergence failure at setup")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Configure base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step("Configure bgp admin distance 200 with CLI in dut.")
+
+ input_dict_1 = {
+ "r3": {
+ "bgp": {
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ },
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify bgp routes have admin distance of 200 in dut.")
+ # Verifying best path
+ dut = "r3"
+ attribute = "admin_distance"
+
+ input_dict = {
+ "ipv4": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "admin_distance": 200,
+ },
+ {
+ "network": NETWORK["ipv4"][1],
+ "admin_distance": 200,
+ },
+ ]
+ }
+ },
+ "ipv6": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "admin_distance": 200,
+ },
+ {
+ "network": NETWORK["ipv6"][1],
+ "admin_distance": 200,
+ },
+ ]
+ }
+ },
+ }
+
+ for addr_type in ADDR_TYPES:
+ result = verify_best_path_as_per_admin_distance(
+ tgen, addr_type, dut, input_dict[addr_type], attribute
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Restart frr on R3")
+ stop_router(tgen, "r3")
+ start_router(tgen, "r3")
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step("Verify ebgp and ibgp routes have admin distance of 200 in dut.")
+ for addr_type in ADDR_TYPES:
+ result = verify_best_path_as_per_admin_distance(
+ tgen, addr_type, dut, input_dict[addr_type], attribute
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Restart bgpd process on R3")
+ kill_router_daemons(tgen, "r3", ["bgpd"])
+ start_router_daemons(tgen, "r3", ["bgpd"])
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step("Verify ebgp and ibgp routes have admin distance of 200 in dut.")
+ for addr_type in ADDR_TYPES:
+ result = verify_best_path_as_per_admin_distance(
+ tgen, addr_type, dut, input_dict[addr_type], attribute
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Clear BGP")
+ for rtr in topo["routers"]:
+ clear_bgp(tgen, "ipv4", rtr)
+ clear_bgp(tgen, "ipv6", rtr)
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step("Verify that zebra selects bgp route.")
+ protocol = "bgp"
+
+ for addr_type in ADDR_TYPES:
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_distance_change/test_bgp_admin_dist_vrf.py b/tests/topotests/bgp_distance_change/test_bgp_admin_dist_vrf.py
new file mode 100755
index 0000000000..559dc93aa0
--- /dev/null
+++ b/tests/topotests/bgp_distance_change/test_bgp_admin_dist_vrf.py
@@ -0,0 +1,900 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+import sys
+import time
+import pytest
+import inspect
+import os
+
+"""Following tests are covered to test bgp admin distance functionality.
+TC_5:
+ Verify bgp admin distance functionality when static route is configured
+ same as bgp learnt route in user vrf.
+
+TC_6: Verify bgp admin distance functionality with ECMP in user vrf.
+
+TC_7:
+ Verify bgp admin distance functionality when routes are
+ imported between VRFs.
+"""
+
+#################################
+# TOPOLOGY
+#################################
+"""
+
+ +-------+
+ +--------- | R2 |
+ | +-------+
+ |iBGP |
+ +-------+ |
+ | R1 | |iBGP
+ +-------+ |
+ | |
+ | iBGP +-------+ eBGP +-------+
+ +---------- | R3 |----------| R4 |
+ +-------+ +-------+
+ |
+ |eBGP
+ |
+ +-------+
+ | R5 |
+ +-------+
+
+
+"""
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+# Required to instantiate the topology builder class.
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ step,
+ write_test_footer,
+ create_static_routes,
+ verify_rib,
+ check_address_types,
+ reset_config_on_routers,
+ check_router_status,
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ verify_best_path_as_per_admin_distance,
+)
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+# Global variables
+topo = None
+bgp_convergence = False
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+NETWORK = {
+ "ipv4": [
+ "192.168.20.1/32",
+ "192.168.20.2/32",
+ "192.168.21.1/32",
+ "192.168.21.2/32",
+ "192.168.22.1/32",
+ "192.168.22.2/32",
+ ],
+ "ipv6": [
+ "fc07:50::1/128",
+ "fc07:50::2/128",
+ "fc07:150::1/128",
+ "fc07:150::2/128",
+ "fc07:1::1/128",
+ "fc07:1::2/128",
+ ],
+}
+ADDR_TYPES = check_address_types()
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ global topo
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_admin_dist_vrf.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Checking BGP convergence
+ global bgp_convergence
+ global ADDR_TYPES
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format(
+ bgp_convergence
+ )
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """teardown_module.
+
+ Teardown the pytest environment.
+ * `mod`: module name
+ """
+ logger.info("Running teardown_module to delete topology")
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+# Tests starting
+#####################################################
+
+
+def test_bgp_admin_distance_ebgp_vrf_p0():
+ """
+ TC: 5
+ Verify bgp admin distance functionality when static route is
+ configured same as ebgp learnt route
+ """
+ tgen = get_topogen()
+ global bgp_convergence
+
+ if bgp_convergence is not True:
+ pytest.skip("skipping test case because of BGP Convergence failure at setup")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Configure base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step("Configure bgp admin distance 200 with CLI in dut.")
+
+ input_dict_1 = {
+ "r3": {
+ "bgp": [
+ {
+ "vrf": "RED",
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ },
+ }
+ ]
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify bgp routes have admin distance of 200 in dut.")
+ # Verifying best path
+ dut = "r3"
+ attribute = "admin_distance"
+
+ input_dict = {
+ "ipv4": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "admin_distance": 200,
+ "vrf": "RED",
+ },
+ {
+ "network": NETWORK["ipv4"][1],
+ "admin_distance": 200,
+ "vrf": "RED",
+ },
+ ]
+ }
+ },
+ "ipv6": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "admin_distance": 200,
+ "vrf": "RED",
+ },
+ {
+ "network": NETWORK["ipv6"][1],
+ "admin_distance": 200,
+ "vrf": "RED",
+ },
+ ]
+ }
+ },
+ }
+
+ for addr_type in ADDR_TYPES:
+ result = verify_best_path_as_per_admin_distance(
+ tgen, addr_type, dut, input_dict[addr_type], attribute, vrf="RED"
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Modify the admin distance value to 150.")
+
+ input_dict_1 = {
+ "r3": {
+ "bgp": [
+ {
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 150, "ibgp": 150, "local": 150}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 150, "ibgp": 150, "local": 150}
+ }
+ },
+ },
+ }
+ ]
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify bgp routes have admin distance of 150 in dut.")
+ # Verifying best path
+ dut = "r3"
+ attribute = "admin_distance"
+
+ input_dict = {
+ "ipv4": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "admin_distance": 150,
+ "vrf": "RED",
+ },
+ {
+ "network": NETWORK["ipv4"][1],
+ "admin_distance": 150,
+ "vrf": "RED",
+ },
+ ]
+ }
+ },
+ "ipv6": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "admin_distance": 150,
+ "vrf": "RED",
+ },
+ {
+ "network": NETWORK["ipv6"][1],
+ "admin_distance": 150,
+ "vrf": "RED",
+ },
+ ]
+ }
+ },
+ }
+
+ for addr_type in ADDR_TYPES:
+ result = verify_best_path_as_per_admin_distance(
+ tgen, addr_type, dut, input_dict[addr_type], attribute, vrf="RED"
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Un configure the admin distance value on DUT")
+
+ input_dict_1 = {
+ "r3": {
+ "bgp": [
+ {
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {
+ "ebgp": 150,
+ "ibgp": 150,
+ "local": 150,
+ "delete": True,
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {
+ "ebgp": 150,
+ "ibgp": 150,
+ "local": 150,
+ "delete": True,
+ }
+ }
+ },
+ },
+ }
+ ]
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify bgp routes have default admin distance in dut.")
+ # Verifying best path
+ dut = "r3"
+ attribute = "admin_distance"
+
+ input_dict = {
+ "ipv4": {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"][0], "admin_distance": 20, "vrf": "RED"},
+ {"network": NETWORK["ipv4"][1], "admin_distance": 20, "vrf": "RED"},
+ ]
+ }
+ },
+ "ipv6": {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][0], "admin_distance": 20, "vrf": "RED"},
+ {"network": NETWORK["ipv6"][1], "admin_distance": 20, "vrf": "RED"},
+ ]
+ }
+ },
+ }
+
+ for addr_type in ADDR_TYPES:
+ result = verify_best_path_as_per_admin_distance(
+ tgen, addr_type, dut, input_dict[addr_type], attribute, vrf="RED"
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure static route Without any admin distance")
+
+ for addr_type in ADDR_TYPES:
+ # Create Static routes
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type], "next_hop": "Null0", "vrf": "RED"}
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that zebra selects static route.")
+ protocol = "static"
+ # dual stack changes
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {"network": NETWORK[addr_type], "next_hop": "Null0", "vrf": "RED"}
+ ]
+ }
+ }
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Configure static route with admin distance of 253")
+ for addr_type in ADDR_TYPES:
+ # Create Static routes
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": "Null0",
+ "admin_distance": 253,
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that zebra selects bgp route.")
+ protocol = "bgp"
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": "Null0",
+ "admin_distance": 253,
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Configure admin distance of 254 in bgp for route .")
+
+ input_dict_1 = {
+ "r3": {
+ "bgp": [
+ {
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 254, "ibgp": 254, "local": 254}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 254, "ibgp": 254, "local": 254}
+ }
+ },
+ },
+ }
+ ]
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that zebra selects static route.")
+ protocol = "static"
+ # dual stack changes
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": "Null0",
+ "admin_distance": 253,
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Configure admin distance of 255 in bgp for route in vrf red")
+
+ input_dict_1 = {
+ "r3": {
+ "bgp": [
+ {
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 255, "ibgp": 255, "local": 255}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 255, "ibgp": 255, "local": 255}
+ }
+ },
+ },
+ }
+ ]
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that zebra selects static route.")
+ protocol = "static"
+ # dual stack changes
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": "Null0",
+ "admin_distance": 253,
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step("Delete the static route.")
+ for addr_type in ADDR_TYPES:
+ # Create Static routes
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": "Null0",
+ "admin_distance": 253,
+ "delete": True,
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that zebra selects bgp route.")
+ protocol = "bgp"
+ # dual stack changes
+ for addr_type in ADDR_TYPES:
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_admin_distance_ebgp_with_imported_rtes_vrf_p0():
+ """
+ TC: 5
+ Verify bgp admin distance functionality when static route is configured
+ same as bgp learnt route in user vrf.
+ """
+ tgen = get_topogen()
+ global bgp_convergence
+
+ if bgp_convergence is not True:
+ pytest.skip("skipping test case because of BGP Convergence failure at setup")
+
+ # test case name
+ tc_name = inspect.stack()[0][3]
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Configure base config as per the topology")
+ reset_config_on_routers(tgen)
+ step("Configure bgp admin distance 200 with CLI in dut.")
+ step(" Import route from vrf to default vrf")
+ input_dict_1 = {
+ "r3": {
+ "bgp": [
+ {
+ "vrf": "RED",
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ },
+ },
+ {
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200},
+ "import": {"vrf": "RED"},
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200},
+ "import": {
+ "vrf": "RED",
+ },
+ }
+ },
+ },
+ },
+ ]
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify bgp routes have admin distance of 200 in dut.")
+ # Verifying best path
+ dut = "r3"
+ attribute = "admin_distance"
+
+ input_dict = {
+ "ipv4": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "admin_distance": 200,
+ "vrf": "RED",
+ },
+ {
+ "network": NETWORK["ipv4"][1],
+ "admin_distance": 200,
+ "vrf": "RED",
+ },
+ ]
+ }
+ },
+ "ipv6": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "admin_distance": 200,
+ "vrf": "RED",
+ },
+ {
+ "network": NETWORK["ipv6"][1],
+ "admin_distance": 200,
+ "vrf": "RED",
+ },
+ ]
+ }
+ },
+ }
+
+ for addr_type in ADDR_TYPES:
+ result = verify_best_path_as_per_admin_distance(
+ tgen, addr_type, dut, input_dict[addr_type], attribute, vrf="RED"
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that routes are getting imported without any issues and "
+ "routes are calculated and installed in rib."
+ )
+
+ input_dict = {
+ "ipv4": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "admin_distance": 200,
+ },
+ {
+ "network": NETWORK["ipv4"][1],
+ "admin_distance": 200,
+ },
+ ]
+ }
+ },
+ "ipv6": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "admin_distance": 200,
+ },
+ {
+ "network": NETWORK["ipv6"][1],
+ "admin_distance": 200,
+ },
+ ]
+ }
+ },
+ }
+
+ step("Verify that zebra selects bgp route.")
+ protocol = "bgp"
+ # dual stack changes
+ for addr_type in ADDR_TYPES:
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
+ assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result4
+ )
+
+ step(" Un configure import route vrf red inside default vrf.")
+ input_dict_1 = {
+ "r3": {
+ "bgp": [
+ {
+ "vrf": "RED",
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ },
+ },
+ {
+ "local_as": 100,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200},
+ "import": {"vrf": "RED", "delete": True},
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200},
+ "import": {"vrf": "RED", "delete": True},
+ }
+ },
+ },
+ },
+ ]
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "ipv4": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "admin_distance": 200,
+ },
+ {
+ "network": NETWORK["ipv4"][1],
+ "admin_distance": 200,
+ },
+ ]
+ }
+ },
+ "ipv6": {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "admin_distance": 200,
+ },
+ {
+ "network": NETWORK["ipv6"][1],
+ "admin_distance": 200,
+ },
+ ]
+ }
+ },
+ }
+
+ step("Verify that route withdrawal happens properly.")
+ protocol = "bgp"
+ # dual stack changes
+ for addr_type in ADDR_TYPES:
+ result4 = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict[addr_type],
+ protocol=protocol,
+ expected=False,
+ )
+ assert (
+ result4 is not True
+ ), "Testcase {} : Failed \n Route is not withdrawn. Error: {}".format(
+ tc_name, result4
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_soo/__init__.py b/tests/topotests/bgp_soo/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_soo/__init__.py
diff --git a/tests/topotests/bgp_soo/cpe1/bgpd.conf b/tests/topotests/bgp_soo/cpe1/bgpd.conf
new file mode 100644
index 0000000000..a8984d4e8b
--- /dev/null
+++ b/tests/topotests/bgp_soo/cpe1/bgpd.conf
@@ -0,0 +1,10 @@
+router bgp 65000
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.2 timers 1 3
+ neighbor 192.168.1.2 timers connect 1
+ neighbor 10.0.0.2 remote-as internal
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_soo/cpe1/zebra.conf b/tests/topotests/bgp_soo/cpe1/zebra.conf
new file mode 100644
index 0000000000..669cb91295
--- /dev/null
+++ b/tests/topotests/bgp_soo/cpe1/zebra.conf
@@ -0,0 +1,12 @@
+!
+interface lo
+ ip address 172.16.255.1/32
+!
+interface cpe1-eth0
+ ip address 192.168.1.1/24
+!
+interface cpe1-eth1
+ ip address 10.0.0.1/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_soo/cpe2/bgpd.conf b/tests/topotests/bgp_soo/cpe2/bgpd.conf
new file mode 100644
index 0000000000..19f7a24e2b
--- /dev/null
+++ b/tests/topotests/bgp_soo/cpe2/bgpd.conf
@@ -0,0 +1,10 @@
+router bgp 65000
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.2 remote-as external
+ neighbor 192.168.2.2 timers 1 3
+ neighbor 192.168.2.2 timers connect 1
+ neighbor 10.0.0.1 remote-as internal
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_soo/cpe2/zebra.conf b/tests/topotests/bgp_soo/cpe2/zebra.conf
new file mode 100644
index 0000000000..52f36c06e8
--- /dev/null
+++ b/tests/topotests/bgp_soo/cpe2/zebra.conf
@@ -0,0 +1,9 @@
+!
+interface cpe2-eth0
+ ip address 192.168.2.1/24
+!
+interface cpe2-eth1
+ ip address 10.0.0.2/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_soo/pe1/bgpd.conf b/tests/topotests/bgp_soo/pe1/bgpd.conf
new file mode 100644
index 0000000000..04a6857c7c
--- /dev/null
+++ b/tests/topotests/bgp_soo/pe1/bgpd.conf
@@ -0,0 +1,27 @@
+router bgp 65001
+ bgp router-id 10.10.10.10
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ neighbor 10.10.10.20 remote-as internal
+ neighbor 10.10.10.20 update-source 10.10.10.10
+ address-family ipv4 vpn
+ neighbor 10.10.10.20 activate
+ exit-address-family
+!
+router bgp 65001 vrf RED
+ bgp router-id 192.168.1.2
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ neighbor 192.168.1.1 timers 1 3
+ neighbor 192.168.1.1 timers connect 1
+ address-family ipv4 unicast
+ neighbor 192.168.1.1 as-override
+ neighbor 192.168.1.1 soo 65000:1
+ label vpn export 1111
+ rd vpn export 192.168.1.2:2
+ rt vpn import 192.168.2.2:2 192.168.1.2:2
+ rt vpn export 192.168.1.2:2
+ export vpn
+ import vpn
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_soo/pe1/ldpd.conf b/tests/topotests/bgp_soo/pe1/ldpd.conf
new file mode 100644
index 0000000000..fb40f06fa7
--- /dev/null
+++ b/tests/topotests/bgp_soo/pe1/ldpd.conf
@@ -0,0 +1,10 @@
+mpls ldp
+ router-id 10.10.10.10
+ !
+ address-family ipv4
+ discovery transport-address 10.10.10.10
+ !
+ interface pe1-eth1
+ !
+ !
+!
diff --git a/tests/topotests/bgp_soo/pe1/ospfd.conf b/tests/topotests/bgp_soo/pe1/ospfd.conf
new file mode 100644
index 0000000000..34f0899c95
--- /dev/null
+++ b/tests/topotests/bgp_soo/pe1/ospfd.conf
@@ -0,0 +1,7 @@
+interface pe1-eth1
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+!
+router ospf
+ router-id 10.10.10.10
+ network 0.0.0.0/0 area 0
diff --git a/tests/topotests/bgp_soo/pe1/zebra.conf b/tests/topotests/bgp_soo/pe1/zebra.conf
new file mode 100644
index 0000000000..cc8ff1983a
--- /dev/null
+++ b/tests/topotests/bgp_soo/pe1/zebra.conf
@@ -0,0 +1,12 @@
+!
+interface lo
+ ip address 10.10.10.10/32
+!
+interface pe1-eth0 vrf RED
+ ip address 192.168.1.2/24
+!
+interface pe1-eth1
+ ip address 10.0.1.1/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_soo/pe2/bgpd.conf b/tests/topotests/bgp_soo/pe2/bgpd.conf
new file mode 100644
index 0000000000..efebc02f27
--- /dev/null
+++ b/tests/topotests/bgp_soo/pe2/bgpd.conf
@@ -0,0 +1,31 @@
+router bgp 65001
+ bgp router-id 10.10.10.20
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ neighbor 10.10.10.10 remote-as internal
+ neighbor 10.10.10.10 update-source 10.10.10.20
+ address-family ipv4 vpn
+ neighbor 10.10.10.10 activate
+ exit-address-family
+!
+router bgp 65001 vrf RED
+ bgp router-id 192.168.2.2
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.1 remote-as external
+ neighbor 192.168.2.1 timers 1 3
+ neighbor 192.168.2.1 timers connect 1
+ address-family ipv4 unicast
+ neighbor 192.168.2.1 as-override
+ neighbor 192.168.2.1 route-map cpe2-in in
+ label vpn export 2222
+ rd vpn export 192.168.2.2:2
+ rt vpn import 192.168.2.2:2 192.168.1.2:2
+ rt vpn export 192.168.2.2:2
+ export vpn
+ import vpn
+ exit-address-family
+!
+! To prefer internal MPLS route over eBGP
+route-map cpe2-in permit 10
+ set local-preference 50
+exit
diff --git a/tests/topotests/bgp_soo/pe2/ldpd.conf b/tests/topotests/bgp_soo/pe2/ldpd.conf
new file mode 100644
index 0000000000..e2b5359993
--- /dev/null
+++ b/tests/topotests/bgp_soo/pe2/ldpd.conf
@@ -0,0 +1,10 @@
+mpls ldp
+ router-id 10.10.10.20
+ !
+ address-family ipv4
+ discovery transport-address 10.10.10.20
+ !
+ interface pe2-eth0
+ !
+ !
+!
diff --git a/tests/topotests/bgp_soo/pe2/ospfd.conf b/tests/topotests/bgp_soo/pe2/ospfd.conf
new file mode 100644
index 0000000000..4c4b1374d1
--- /dev/null
+++ b/tests/topotests/bgp_soo/pe2/ospfd.conf
@@ -0,0 +1,7 @@
+interface pe2-eth0
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+!
+router ospf
+ router-id 10.10.10.20
+ network 0.0.0.0/0 area 0
diff --git a/tests/topotests/bgp_soo/pe2/zebra.conf b/tests/topotests/bgp_soo/pe2/zebra.conf
new file mode 100644
index 0000000000..8049a74601
--- /dev/null
+++ b/tests/topotests/bgp_soo/pe2/zebra.conf
@@ -0,0 +1,12 @@
+!
+interface lo
+ ip address 10.10.10.20/32
+!
+interface pe2-eth1 vrf RED
+ ip address 192.168.2.2/24
+!
+interface pe2-eth0
+ ip address 10.0.1.2/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_soo/test_bgp_soo.py b/tests/topotests/bgp_soo/test_bgp_soo.py
new file mode 100644
index 0000000000..e3a7334c60
--- /dev/null
+++ b/tests/topotests/bgp_soo/test_bgp_soo.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Test if BGP SoO per neighbor works correctly. Routes having SoO
+extended community MUST be rejected if the neighbor is configured
+with soo (neighbor soo).
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ tgen.add_router("cpe1")
+ tgen.add_router("cpe2")
+ tgen.add_router("pe1")
+ tgen.add_router("pe2")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["cpe1"])
+ switch.add_link(tgen.gears["pe1"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["pe1"])
+ switch.add_link(tgen.gears["pe2"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["pe2"])
+ switch.add_link(tgen.gears["cpe2"])
+
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["cpe2"])
+ switch.add_link(tgen.gears["cpe1"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ pe1 = tgen.gears["pe1"]
+ pe2 = tgen.gears["pe2"]
+
+ pe1.run("ip link add RED type vrf table 1001")
+ pe1.run("ip link set up dev RED")
+ pe2.run("ip link add RED type vrf table 1001")
+ pe2.run("ip link set up dev RED")
+ pe1.run("ip link set pe1-eth0 master RED")
+ pe2.run("ip link set pe2-eth1 master RED")
+
+ pe1.run("sysctl -w net.ipv4.ip_forward=1")
+ pe2.run("sysctl -w net.ipv4.ip_forward=1")
+ pe1.run("sysctl -w net.mpls.conf.pe1-eth0.input=1")
+ pe2.run("sysctl -w net.mpls.conf.pe2-eth1.input=1")
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_soo():
+ tgen = get_topogen()
+
+ pe2 = tgen.gears["pe2"]
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ def _bgp_soo_unconfigured():
+ output = json.loads(
+ pe2.vtysh_cmd(
+ "show bgp vrf RED ipv4 unicast neighbors 192.168.2.1 advertised-routes json"
+ )
+ )
+ expected = {"advertisedRoutes": {"172.16.255.1/32": {"path": "65001"}}}
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_soo_unconfigured)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Failed to see BGP convergence in pe2"
+
+ step("Configure SoO (65000:1) for PE2 -- CPE2 session")
+ pe2.vtysh_cmd(
+ """
+ configure terminal
+ router bgp 65001 vrf RED
+ address-family ipv4 unicast
+ neighbor 192.168.2.1 soo 65000:1
+ """
+ )
+
+ def _bgp_soo_configured():
+ output = json.loads(
+ pe2.vtysh_cmd(
+ "show bgp vrf RED ipv4 unicast neighbors 192.168.2.1 advertised-routes json"
+ )
+ )
+ expected = {"advertisedRoutes": {"172.16.255.1/32": None}}
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_soo_configured)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "SoO filtering does not work from pe2"
+
+ step("Configure SoO (65000:2) for PE2 -- CPE2 session")
+ pe2.vtysh_cmd(
+ """
+ configure terminal
+ router bgp 65001 vrf RED
+ address-family ipv4 unicast
+ neighbor 192.168.2.1 soo 65000:2
+ """
+ )
+
+ test_func = functools.partial(_bgp_soo_unconfigured)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "SoO filtering does not work from pe2"
+
+ step("Unconfigure SoO for PE2 -- CPE2 session")
+ pe2.vtysh_cmd(
+ """
+ configure terminal
+ router bgp 65001 vrf RED
+ address-family ipv4 unicast
+ no neighbor 192.168.2.1 soo
+ """
+ )
+
+ test_func = functools.partial(_bgp_soo_unconfigured)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "SoO filtering does not work from pe2"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py
index 120a3e82e4..f79ca71a64 100755
--- a/tests/topotests/conftest.py
+++ b/tests/topotests/conftest.py
@@ -363,7 +363,7 @@ def pytest_configure(config):
# Check environment now that we have config
if not diagnose_env(rundir):
- pytest.exit("environment has errors, please read the logs")
+ pytest.exit("environment has errors, please read the logs in %s" % rundir)
@pytest.fixture(autouse=True, scope="session")
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index 341ec25a19..7ab36c4fcd 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -491,6 +491,25 @@ def __create_bgp_unicast_neighbor(
cmd = "no {}".format(cmd)
config_data.append(cmd)
+ admin_dist_data = addr_data.setdefault("distance", {})
+ if admin_dist_data:
+ if len(admin_dist_data) < 2:
+ logger.debug(
+ "Router %s: pass the admin distance values for "
+ "ebgp, ibgp and local routes",
+ router,
+ )
+ cmd = "distance bgp {} {} {}".format(
+ admin_dist_data["ebgp"],
+ admin_dist_data["ibgp"],
+ admin_dist_data["local"],
+ )
+
+ del_action = admin_dist_data.setdefault("delete", False)
+ if del_action:
+ cmd = "no distance bgp"
+ config_data.append(cmd)
+
import_vrf_data = addr_data.setdefault("import", {})
if import_vrf_data:
cmd = "import vrf {}".format(import_vrf_data["vrf"])
@@ -2662,7 +2681,7 @@ def verify_best_path_as_per_bgp_attribute(
@retry(retry_timeout=10)
def verify_best_path_as_per_admin_distance(
- tgen, addr_type, router, input_dict, attribute, expected=True
+ tgen, addr_type, router, input_dict, attribute, expected=True, vrf=None
):
"""
API is to verify best path according to admin distance for given
@@ -2678,6 +2697,7 @@ def verify_best_path_as_per_admin_distance(
* `input_dict`: defines different routes with different admin distance
to calculate for which route best path is selected
* `expected` : expected results from API, by-default True
+ * `vrf`: Pass vrf name check for perticular vrf.
Usage
-----
@@ -2710,9 +2730,14 @@ def verify_best_path_as_per_admin_distance(
# Show ip route cmd
if addr_type == "ipv4":
- command = "show ip route json"
+ command = "show ip route"
+ else:
+ command = "show ipv6 route"
+
+ if vrf:
+ command = "{} vrf {} json".format(command, vrf)
else:
- command = "show ipv6 route json"
+ command = "{} json".format(command)
for routes_from_router in input_dict.keys():
sh_ip_route_json = router_list[routes_from_router].vtysh_cmd(
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index fa33b02ed1..5f4c280715 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -3339,6 +3339,7 @@ def verify_rib(
metric=None,
fib=None,
count_only=False,
+ admin_distance=None,
):
"""
Data will be read from input_dict or input JSON file, API will generate
@@ -3611,6 +3612,30 @@ def verify_rib(
)
return errormsg
+ if admin_distance is not None:
+ if "distance" not in rib_routes_json[st_rt][0]:
+ errormsg = (
+ "[DUT: {}]: admin distance is"
+ " not present for"
+ " route {} in RIB \n".format(dut, st_rt)
+ )
+ return errormsg
+
+ if (
+ admin_distance
+ != rib_routes_json[st_rt][0]["distance"]
+ ):
+ errormsg = (
+ "[DUT: {}]: admin distance value "
+ "{} is not matched for "
+ "route {} in RIB \n".format(
+ dut,
+ admin_distance,
+ st_rt,
+ )
+ )
+ return errormsg
+
if metric is not None:
if "metric" not in rib_routes_json[st_rt][0]:
errormsg = (
@@ -3764,7 +3789,7 @@ def verify_rib(
@retry(retry_timeout=12)
-def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None):
+def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
"""
Data will be read from input_dict or input JSON file, API will generate
same prefixes, which were redistributed by either create_static_routes() or
@@ -3822,6 +3847,9 @@ def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None):
found_routes = []
missing_routes = []
+ if protocol:
+ command = "{} {}".format(command, protocol)
+
if "static_routes" in input_dict[routerInput]:
static_routes = input_dict[routerInput]["static_routes"]
@@ -5039,7 +5067,7 @@ def verify_ip_nht(tgen, input_dict):
for nh in nh_list:
if nh in show_ip_nht:
- nht = run_frr_cmd(rnode, f"show ip nht {nh}")
+ nht = run_frr_cmd(rnode, "show ip nht {}".format(nh))
if "unresolved" in nht:
errormsg = "Nexthop {} became unresolved on {}".format(nh, router)
return errormsg
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index c51a187f28..04712eda87 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -1293,7 +1293,7 @@ def diagnose_env_linux(rundir):
)
continue
- logger.warning("could not find {} in {}".format(fname, frrdir))
+ logger.error("could not find {} in {}".format(fname, frrdir))
ret = False
else:
if fname != "zebra":
diff --git a/tools/frrcommon.sh.in b/tools/frrcommon.sh.in
index 759d498379..b589ced965 100755
--- a/tools/frrcommon.sh.in
+++ b/tools/frrcommon.sh.in
@@ -272,7 +272,7 @@ all_start() {
}
all_stop() {
- local pids reversed
+ local pids reversed need_zebra
daemon_list enabled_daemons disabled_daemons
[ "$1" = "--reallyall" ] && enabled_daemons="$enabled_daemons $disabled_daemons"
@@ -282,13 +282,23 @@ all_stop() {
reversed="$dmninst $reversed"
done
+ # Stop zebra last, after trying to stop the other daemons
for dmninst in $reversed; do
+ if [ "$dmninst" = "zebra" ]; then
+ need_zebra="yes"
+ continue
+ fi
+
daemon_stop "$dmninst" "$1" &
pids="$pids $!"
done
for pid in $pids; do
wait $pid
done
+
+ if [ -n "$need_zebra" ]; then
+ daemon_stop "zebra"
+ fi
}
all_status() {
diff --git a/zebra/kernel_socket.c b/zebra/kernel_socket.c
index cb549339af..4c089ee194 100644
--- a/zebra/kernel_socket.c
+++ b/zebra/kernel_socket.c
@@ -1112,14 +1112,6 @@ void rtm_read(struct rt_msghdr *rtm)
} else
return;
- /*
- * CHANGE: delete the old prefix, we have no further information
- * to specify the route really
- */
- if (rtm->rtm_type == RTM_CHANGE)
- rib_delete(afi, SAFI_UNICAST, VRF_DEFAULT, ZEBRA_ROUTE_KERNEL,
- 0, zebra_flags, &p, NULL, NULL, 0, RT_TABLE_MAIN, 0,
- 0, true);
if (rtm->rtm_type == RTM_GET || rtm->rtm_type == RTM_ADD
|| rtm->rtm_type == RTM_CHANGE)
rib_add(afi, SAFI_UNICAST, VRF_DEFAULT, proto, 0, zebra_flags,
diff --git a/zebra/redistribute.c b/zebra/redistribute.c
index 1a28f8ceec..4a8fe938ed 100644
--- a/zebra/redistribute.c
+++ b/zebra/redistribute.c
@@ -685,15 +685,10 @@ int zebra_add_import_table_entry(struct zebra_vrf *zvrf, struct route_node *rn,
zebra_del_import_table_entry(zvrf, rn, same);
}
- newre = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
- newre->type = ZEBRA_ROUTE_TABLE;
- newre->distance = zebra_import_table_distance[afi][re->table];
- newre->flags = re->flags;
- newre->metric = re->metric;
- newre->mtu = re->mtu;
- newre->table = zvrf->table_id;
- newre->uptime = monotime(NULL);
- newre->instance = re->table;
+ newre = zebra_rib_route_entry_new(
+ 0, ZEBRA_ROUTE_TABLE, re->table, re->flags, re->nhe_id,
+ zvrf->table_id, re->metric, re->mtu,
+ zebra_import_table_distance[afi][re->table], re->tag);
ng = nexthop_group_new();
copy_nexthops(&ng->nexthop, re->nhe->nhg.nexthop, NULL);
diff --git a/zebra/rib.h b/zebra/rib.h
index a40843e27f..dec5b2b8d6 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -178,15 +178,17 @@ struct route_entry {
/* meta-queue structure:
* sub-queue 0: nexthop group objects
* sub-queue 1: EVPN/VxLAN objects
- * sub-queue 2: connected
- * sub-queue 3: kernel
- * sub-queue 4: static
- * sub-queue 5: RIP, RIPng, OSPF, OSPF6, IS-IS, EIGRP, NHRP
- * sub-queue 6: iBGP, eBGP
- * sub-queue 7: any other origin (if any) typically those that
+ * sub-queue 2: Early Route Processing
+ * sub-queue 3: Early Label Processing
+ * sub-queue 4: connected
+ * sub-queue 5: kernel
+ * sub-queue 6: static
+ * sub-queue 7: RIP, RIPng, OSPF, OSPF6, IS-IS, EIGRP, NHRP
+ * sub-queue 8: iBGP, eBGP
+ * sub-queue 9: any other origin (if any) typically those that
* don't generate routes
*/
-#define MQ_SIZE 8
+#define MQ_SIZE 10
struct meta_queue {
struct list *subq[MQ_SIZE];
uint32_t size; /* sum of lengths of all subqueues */
@@ -342,6 +344,12 @@ extern void _route_entry_dump(const char *func, union prefixconstptr pp,
union prefixconstptr src_pp,
const struct route_entry *re);
+struct route_entry *
+zebra_rib_route_entry_new(vrf_id_t vrf_id, int type, uint8_t instance,
+ uint32_t flags, uint32_t nhe_id, uint32_t table_id,
+ uint32_t metric, uint32_t mtu, uint8_t distance,
+ route_tag_t tag);
+
#define ZEBRA_RIB_LOOKUP_ERROR -1
#define ZEBRA_RIB_FOUND_EXACT 0
#define ZEBRA_RIB_FOUND_NOGATE 1
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index 0eab1fa850..e883033d59 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -937,44 +937,38 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id,
afi = AFI_IP6;
if (h->nlmsg_type == RTM_NEWROUTE) {
+ struct route_entry *re;
+ struct nexthop_group *ng = NULL;
+
+ re = zebra_rib_route_entry_new(vrf_id, proto, 0, flags, nhe_id,
+ table, metric, mtu, distance,
+ tag);
+ if (!nhe_id)
+ ng = nexthop_group_new();
if (!tb[RTA_MULTIPATH]) {
- struct nexthop nh = {0};
+ struct nexthop *nexthop, nh;
if (!nhe_id) {
nh = parse_nexthop_unicast(
ns_id, rtm, tb, bh_type, index, prefsrc,
gate, afi, vrf_id);
+
+ nexthop = nexthop_new();
+ *nexthop = nh;
+ nexthop_group_add_sorted(ng, nexthop);
}
- rib_add(afi, SAFI_UNICAST, vrf_id, proto, 0, flags, &p,
- &src_p, &nh, nhe_id, table, metric, mtu,
- distance, tag, startup);
} else {
/* This is a multipath route */
- struct route_entry *re;
- struct nexthop_group *ng = NULL;
struct rtnexthop *rtnh =
(struct rtnexthop *)RTA_DATA(tb[RTA_MULTIPATH]);
- re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
- re->type = proto;
- re->distance = distance;
- re->flags = flags;
- re->metric = metric;
- re->mtu = mtu;
- re->vrf_id = vrf_id;
- re->table = table;
- re->uptime = monotime(NULL);
- re->tag = tag;
- re->nhe_id = nhe_id;
-
if (!nhe_id) {
uint8_t nhop_num;
/* Use temporary list of nexthops; parse
* message payload's nexthops.
*/
- ng = nexthop_group_new();
nhop_num =
parse_multipath_nexthops_unicast(
ns_id, ng, rtm, rtnh, tb,
@@ -989,23 +983,22 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id,
ng = NULL;
}
}
-
- if (nhe_id || ng)
- rib_add_multipath(afi, SAFI_UNICAST, &p,
- &src_p, re, ng, startup);
- else {
- /*
- * I really don't see how this is possible
- * but since we are testing for it let's
- * let the end user know why the route
- * that was just received was swallowed
- * up and forgotten
- */
- zlog_err(
- "%s: %pFX multipath RTM_NEWROUTE has a invalid nexthop group from the kernel",
- __func__, &p);
- XFREE(MTYPE_RE, re);
- }
+ }
+ if (nhe_id || ng)
+ rib_add_multipath(afi, SAFI_UNICAST, &p, &src_p, re, ng,
+ startup);
+ else {
+ /*
+ * I really don't see how this is possible
+ * but since we are testing for it let's
+ * let the end user know why the route
+ * that was just received was swallowed
+ * up and forgotten
+ */
+ zlog_err(
+ "%s: %pFX multipath RTM_NEWROUTE has a invalid nexthop group from the kernel",
+ __func__, &p);
+ XFREE(MTYPE_RE, re);
}
} else {
if (nhe_id) {
diff --git a/zebra/tc_netlink.c b/zebra/tc_netlink.c
index 89ce075454..4fb0241d1d 100644
--- a/zebra/tc_netlink.c
+++ b/zebra/tc_netlink.c
@@ -294,7 +294,7 @@ static ssize_t netlink_tclass_msg_encode(int cmd, struct zebra_dplane_ctx *ctx,
htb_opt.cbuffer = cbuffer;
tc_calc_rate_table(&htb_opt.rate, rtab, mtu);
- tc_calc_rate_table(&htb_opt.ceil, rtab, mtu);
+ tc_calc_rate_table(&htb_opt.ceil, ctab, mtu);
htb_opt.ceil.mpu = htb_opt.rate.mpu = 0;
htb_opt.ceil.overhead = htb_opt.rate.overhead = 0;
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index a578395ef8..761ba789b8 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -2034,7 +2034,7 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
struct nhg_backup_info *bnhg = NULL;
int ret;
vrf_id_t vrf_id;
- struct nhg_hash_entry nhe;
+ struct nhg_hash_entry nhe, *n = NULL;
s = msg;
if (zapi_route_decode(s, &api) < 0) {
@@ -2052,17 +2052,10 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
(int)api.message, api.flags);
/* Allocate new route. */
- re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
- re->type = api.type;
- re->instance = api.instance;
- re->flags = api.flags;
- re->uptime = monotime(NULL);
- re->vrf_id = vrf_id;
-
- if (api.tableid)
- re->table = api.tableid;
- else
- re->table = zvrf->table_id;
+ re = zebra_rib_route_entry_new(
+ vrf_id, api.type, api.instance, api.flags, api.nhgid,
+ api.tableid ? api.tableid : zvrf->table_id, api.metric, api.mtu,
+ api.distance, api.tag);
if (!CHECK_FLAG(api.message, ZAPI_MESSAGE_NHG)
&& (!CHECK_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP)
@@ -2087,9 +2080,6 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
&api.prefix);
}
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_NHG))
- re->nhe_id = api.nhgid;
-
if (!re->nhe_id
&& (!zapi_read_nexthops(client, &api.prefix, api.nexthops,
api.flags, api.message, api.nexthop_num,
@@ -2105,15 +2095,6 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
return;
}
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_DISTANCE))
- re->distance = api.distance;
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_METRIC))
- re->metric = api.metric;
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_TAG))
- re->tag = api.tag;
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_MTU))
- re->mtu = api.mtu;
-
if (CHECK_FLAG(api.message, ZAPI_MESSAGE_OPAQUE)) {
re->opaque =
XMALLOC(MTYPE_RE_OPAQUE,
@@ -2161,9 +2142,10 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
zebra_nhe_init(&nhe, afi, ng->nexthop);
nhe.nhg.nexthop = ng->nexthop;
nhe.backup_info = bnhg;
+ n = zebra_nhe_copy(&nhe, 0);
}
- ret = rib_add_multipath_nhe(afi, api.safi, &api.prefix, src_p,
- re, &nhe, false);
+ ret = rib_add_multipath_nhe(afi, api.safi, &api.prefix, src_p, re, n,
+ false);
/*
* rib_add_multipath_nhe only fails in a couple spots
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 763c92ebb6..6a691a222f 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -2775,14 +2775,13 @@ int dplane_ctx_tc_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op)
{
int ret = EINVAL;
- struct zebra_vrf *zvrf = NULL;
struct zebra_ns *zns = NULL;
ctx->zd_op = op;
ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
/* TODO: init traffic control qdisc */
- zns = zvrf ? zvrf->zns : zebra_ns_lookup(NS_DEFAULT);
+ zns = zebra_ns_lookup(NS_DEFAULT);
dplane_ctx_ns_init(ctx, zns, true);
@@ -3513,7 +3512,7 @@ dplane_route_update_internal(struct route_node *rn,
static enum zebra_dplane_result dplane_tc_update_internal(enum dplane_op_e op)
{
enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
- int ret = EINVAL;
+ int ret;
struct zebra_dplane_ctx *ctx = NULL;
/* Obtain context block */
diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c
index 3010a516b9..9756d9ba08 100644
--- a/zebra/zebra_mpls.c
+++ b/zebra/zebra_mpls.c
@@ -2747,9 +2747,9 @@ static bool ftn_update_nexthop(bool add_p, struct nexthop *nexthop,
return true;
}
-void mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
- struct prefix *prefix, uint8_t route_type,
- unsigned short route_instance)
+void zebra_mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ struct prefix *prefix, uint8_t route_type,
+ uint8_t route_instance)
{
struct route_table *table;
struct route_node *rn;
@@ -2882,8 +2882,8 @@ static bool ftn_update_znh(bool add_p, enum lsp_types_t type,
* There are several changes that need to be made, in several zebra
* data structures, so we want to do all the work required at once.
*/
-void mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
- const struct zapi_labels *zl)
+void zebra_mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
+ const struct zapi_labels *zl)
{
int i, counter, ret = 0;
char buf[NEXTHOP_STRLEN];
diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h
index a114f01339..cf247861f8 100644
--- a/zebra/zebra_mpls.h
+++ b/zebra/zebra_mpls.h
@@ -260,17 +260,30 @@ void zebra_mpls_print_fec(struct vty *vty, struct zebra_vrf *zvrf,
/*
* Handle zapi request to install/uninstall LSP and
* (optionally) FEC-To-NHLFE (FTN) bindings.
+ *
+ * mpls_zapi_labels_process -> Installs for future processing
+ * in the meta-q
+ * zebra_mpls_labels_process -> called by the meta-q
*/
void mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
const struct zapi_labels *zl);
+void zebra_mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
+ const struct zapi_labels *zl);
/*
* Uninstall all NHLFEs bound to a single FEC.
+ *
+ * mpls_ftn_uninstall -> Called to enqueue into early label processing
+ * via the metaq
+ * zebra_mpls_ftn_uninstall -> Called when we process the meta q
+ * for this item
*/
void mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
struct prefix *prefix, uint8_t route_type,
- unsigned short route_instance);
-
+ uint8_t route_instance);
+void zebra_mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ struct prefix *prefix, uint8_t route_type,
+ uint8_t route_instance);
/*
* Install/update a NHLFE for an LSP in the forwarding table. This may be
* a new LSP entry or a new NHLFE for an existing in-label or an update of
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 03bda8cc33..bd7e8bbbd0 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -81,6 +81,8 @@ DEFINE_HOOK(rib_update, (struct route_node * rn, const char *reason),
enum meta_queue_indexes {
META_QUEUE_NHG,
META_QUEUE_EVPN,
+ META_QUEUE_EARLY_ROUTE,
+ META_QUEUE_EARLY_LABEL,
META_QUEUE_CONNECTED,
META_QUEUE_KERNEL,
META_QUEUE_STATIC,
@@ -173,6 +175,29 @@ struct wq_evpn_wrapper {
#define WQ_EVPN_WRAPPER_TYPE_REM_MACIP 0x03
#define WQ_EVPN_WRAPPER_TYPE_REM_VTEP 0x04
+enum wq_label_types {
+ WQ_LABEL_FTN_UNINSTALL,
+ WQ_LABEL_LABELS_PROCESS,
+};
+
+struct wq_label_wrapper {
+ enum wq_label_types type;
+ vrf_id_t vrf_id;
+
+ struct prefix p;
+ enum lsp_types_t ltype;
+ uint8_t route_type;
+ uint8_t route_instance;
+
+ bool add_p;
+ struct zapi_labels zl;
+
+ int afi;
+};
+
+static void rib_addnode(struct route_node *rn, struct route_entry *re,
+ int process);
+
/* %pRN is already a printer for route_nodes that just prints the prefix */
#ifdef _FRR_ATTRIBUTE_PRINTFRR
#pragma FRR printfrr_ext "%pZN" (struct route_node *)
@@ -185,6 +210,10 @@ static const char *subqueue2str(enum meta_queue_indexes index)
return "NHG Objects";
case META_QUEUE_EVPN:
return "EVPN/VxLan Objects";
+ case META_QUEUE_EARLY_ROUTE:
+ return "Early Route Processing";
+ case META_QUEUE_EARLY_LABEL:
+ return "Early Label Handling";
case META_QUEUE_CONNECTED:
return "Connected Routes";
case META_QUEUE_KERNEL:
@@ -2468,6 +2497,33 @@ static void process_subq_nhg(struct listnode *lnode)
XFREE(MTYPE_WQ_WRAPPER, w);
}
+static void process_subq_early_label(struct listnode *lnode)
+{
+ struct wq_label_wrapper *w = listgetdata(lnode);
+ struct zebra_vrf *zvrf;
+
+ if (!w)
+ return;
+
+ zvrf = vrf_info_lookup(w->vrf_id);
+ if (!zvrf) {
+ XFREE(MTYPE_WQ_WRAPPER, w);
+ return;
+ }
+
+ switch (w->type) {
+ case WQ_LABEL_FTN_UNINSTALL:
+ zebra_mpls_ftn_uninstall(zvrf, w->ltype, &w->p, w->route_type,
+ w->route_instance);
+ break;
+ case WQ_LABEL_LABELS_PROCESS:
+ zebra_mpls_zapi_labels_process(w->add_p, zvrf, &w->zl);
+ break;
+ }
+
+ XFREE(MTYPE_WQ_WRAPPER, w);
+}
+
static void process_subq_route(struct listnode *lnode, uint8_t qindex)
{
struct route_node *rnode = NULL;
@@ -2506,6 +2562,460 @@ static void process_subq_route(struct listnode *lnode, uint8_t qindex)
route_unlock_node(rnode);
}
+static void rib_re_nhg_free(struct route_entry *re)
+{
+ if (re->nhe && re->nhe_id) {
+ assert(re->nhe->id == re->nhe_id);
+ route_entry_update_nhe(re, NULL);
+ } else if (re->nhe && re->nhe->nhg.nexthop)
+ nexthops_free(re->nhe->nhg.nexthop);
+
+ nexthops_free(re->fib_ng.nexthop);
+}
+
+struct zebra_early_route {
+ afi_t afi;
+ safi_t safi;
+ struct prefix p;
+ struct prefix_ipv6 src_p;
+ bool src_p_provided;
+ struct route_entry *re;
+ struct nhg_hash_entry *re_nhe;
+ bool startup;
+ bool deletion;
+ bool fromkernel;
+};
+
+static void early_route_memory_free(struct zebra_early_route *ere)
+{
+ if (ere->re_nhe)
+ zebra_nhg_free(ere->re_nhe);
+
+ XFREE(MTYPE_RE, ere->re);
+ XFREE(MTYPE_WQ_WRAPPER, ere);
+}
+
+static void process_subq_early_route_add(struct zebra_early_route *ere)
+{
+ struct route_entry *re = ere->re;
+ struct route_table *table;
+ struct nhg_hash_entry *nhe = NULL;
+ struct route_node *rn;
+ struct route_entry *same = NULL, *first_same = NULL;
+ int same_count = 0;
+ rib_dest_t *dest;
+
+ /* Lookup table. */
+ table = zebra_vrf_get_table_with_table_id(ere->afi, ere->safi,
+ re->vrf_id, re->table);
+ if (!table) {
+ early_route_memory_free(ere);
+ return;
+ }
+
+ if (re->nhe_id > 0) {
+ nhe = zebra_nhg_lookup_id(re->nhe_id);
+
+ if (!nhe) {
+ /*
+ * We've received from the kernel a nexthop id
+ * that we don't have saved yet. More than likely
+ * it has not been processed and is on the
+ * queue to be processed. Let's stop what we
+ * are doing and cause the meta q to be processed
+ * storing this for later.
+ *
+ * This is being done this way because zebra
+ * runs with the assumption t
+ */
+ flog_err(
+ EC_ZEBRA_TABLE_LOOKUP_FAILED,
+ "Zebra failed to find the nexthop hash entry for id=%u in a route entry %pFX",
+ re->nhe_id, &ere->p);
+
+ early_route_memory_free(ere);
+ return;
+ }
+ } else {
+ /* Lookup nhe from route information */
+ nhe = zebra_nhg_rib_find_nhe(ere->re_nhe, ere->afi);
+ if (!nhe) {
+ char buf2[PREFIX_STRLEN] = "";
+
+ flog_err(
+ EC_ZEBRA_TABLE_LOOKUP_FAILED,
+ "Zebra failed to find or create a nexthop hash entry for %pFX%s%s",
+ &ere->p, ere->src_p_provided ? " from " : "",
+ ere->src_p_provided
+ ? prefix2str(&ere->src_p, buf2,
+ sizeof(buf2))
+ : "");
+
+ early_route_memory_free(ere);
+ return;
+ }
+ }
+
+ /*
+ * Attach the re to the nhe's nexthop group.
+ *
+ * TODO: This will need to change when we start getting IDs from upper
+ * level protocols, as the refcnt might be wrong, since it checks
+ * if old_id != new_id.
+ */
+ route_entry_update_nhe(re, nhe);
+
+ /* Make it sure prefixlen is applied to the prefix. */
+ apply_mask(&ere->p);
+ if (ere->src_p_provided)
+ apply_mask_ipv6(&ere->src_p);
+
+ /* Set default distance by route type. */
+ if (re->distance == 0)
+ re->distance = route_distance(re->type);
+
+ /* Lookup route node.*/
+ rn = srcdest_rnode_get(table, &ere->p,
+ ere->src_p_provided ? &ere->src_p : NULL);
+
+ /*
+ * If same type of route are installed, treat it as a implicit
+ * withdraw. If the user has specified the No route replace semantics
+ * for the install don't do a route replace.
+ */
+ RNODE_FOREACH_RE (rn, same) {
+ if (CHECK_FLAG(same->status, ROUTE_ENTRY_REMOVED)) {
+ same_count++;
+ continue;
+ }
+
+ /* Compare various route_entry properties */
+ if (rib_compare_routes(re, same)) {
+ same_count++;
+
+ if (first_same == NULL)
+ first_same = same;
+ }
+ }
+
+ same = first_same;
+
+ if (!ere->startup && (re->flags & ZEBRA_FLAG_SELFROUTE) &&
+ zrouter.asic_offloaded) {
+ if (!same) {
+ if (IS_ZEBRA_DEBUG_RIB)
+ zlog_debug(
+ "prefix: %pRN is a self route where we do not have an entry for it. Dropping this update, it's useless",
+ rn);
+ /*
+ * We are not on startup, this is a self route
+ * and we have asic offload. Which means
+ * we are getting a callback for a entry
+ * that was already deleted to the kernel
+ * but an earlier response was just handed
+ * back. Drop it on the floor
+ */
+ early_route_memory_free(ere);
+ return;
+ }
+ }
+
+ /* If this route is kernel/connected route, notify the dataplane. */
+ if (RIB_SYSTEM_ROUTE(re)) {
+ /* Notify dataplane */
+ dplane_sys_route_add(rn, re);
+ }
+
+ /* Link new re to node.*/
+ if (IS_ZEBRA_DEBUG_RIB) {
+ rnode_debug(
+ rn, re->vrf_id,
+ "Inserting route rn %p, re %p (%s) existing %p, same_count %d",
+ rn, re, zebra_route_string(re->type), same, same_count);
+
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ route_entry_dump(
+ &ere->p,
+ ere->src_p_provided ? &ere->src_p : NULL, re);
+ }
+
+ SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
+ rib_addnode(rn, re, 1);
+
+ /* Free implicit route.*/
+ if (same)
+ rib_delnode(rn, same);
+
+ /* See if we can remove some RE entries that are queued for
+ * removal, but won't be considered in rib processing.
+ */
+ dest = rib_dest_from_rnode(rn);
+ RNODE_FOREACH_RE_SAFE (rn, re, same) {
+ if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED)) {
+ /* If the route was used earlier, must retain it. */
+ if (dest && re == dest->selected_fib)
+ continue;
+
+ if (IS_ZEBRA_DEBUG_RIB)
+ rnode_debug(rn, re->vrf_id,
+ "rn %p, removing unneeded re %p",
+ rn, re);
+
+ rib_unlink(rn, re);
+ }
+ }
+
+ route_unlock_node(rn);
+ if (ere->re_nhe)
+ zebra_nhg_free(ere->re_nhe);
+ XFREE(MTYPE_WQ_WRAPPER, ere);
+}
+
+static void process_subq_early_route_delete(struct zebra_early_route *ere)
+{
+ struct route_table *table;
+ struct route_node *rn;
+ struct route_entry *re;
+ struct route_entry *fib = NULL;
+ struct route_entry *same = NULL;
+ struct nexthop *rtnh;
+ char buf2[INET6_ADDRSTRLEN];
+ rib_dest_t *dest;
+
+ if (ere->src_p_provided)
+ assert(!ere->src_p.prefixlen || ere->afi == AFI_IP6);
+
+ /* Lookup table. */
+ table = zebra_vrf_lookup_table_with_table_id(
+ ere->afi, ere->safi, ere->re->vrf_id, ere->re->table);
+ if (!table) {
+ early_route_memory_free(ere);
+ return;
+ }
+
+ /* Apply mask. */
+ apply_mask(&ere->p);
+ if (ere->src_p_provided)
+ apply_mask_ipv6(&ere->src_p);
+
+ /* Lookup route node. */
+ rn = srcdest_rnode_lookup(table, &ere->p,
+ ere->src_p_provided ? &ere->src_p : NULL);
+ if (!rn) {
+ if (IS_ZEBRA_DEBUG_RIB) {
+ char src_buf[PREFIX_STRLEN];
+ struct vrf *vrf = vrf_lookup_by_id(ere->re->vrf_id);
+
+ if (ere->src_p_provided && ere->src_p.prefixlen)
+ prefix2str(&ere->src_p, src_buf,
+ sizeof(src_buf));
+ else
+ src_buf[0] = '\0';
+
+ zlog_debug("%s[%d]:%pRN%s%s doesn't exist in rib",
+ vrf->name, ere->re->table, rn,
+ (src_buf[0] != '\0') ? " from " : "",
+ src_buf);
+ }
+ early_route_memory_free(ere);
+ return;
+ }
+
+ dest = rib_dest_from_rnode(rn);
+ fib = dest->selected_fib;
+
+ struct nexthop *nh = NULL;
+
+ if (ere->re->nhe)
+ nh = ere->re->nhe->nhg.nexthop;
+
+ /* Lookup same type route. */
+ RNODE_FOREACH_RE (rn, re) {
+ if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED))
+ continue;
+
+ if (re->type != ere->re->type)
+ continue;
+ if (re->instance != ere->re->instance)
+ continue;
+ if (CHECK_FLAG(re->flags, ZEBRA_FLAG_RR_USE_DISTANCE) &&
+ ere->re->distance != re->distance)
+ continue;
+
+ if (re->type == ZEBRA_ROUTE_KERNEL &&
+ re->metric != ere->re->metric)
+ continue;
+ if (re->type == ZEBRA_ROUTE_CONNECT && (rtnh = nh) &&
+ rtnh->type == NEXTHOP_TYPE_IFINDEX && nh) {
+ if (rtnh->ifindex != nh->ifindex)
+ continue;
+ same = re;
+ break;
+ }
+
+ /* Make sure that the route found has the same gateway. */
+ if (ere->re->nhe_id && re->nhe_id == ere->re->nhe_id) {
+ same = re;
+ break;
+ }
+
+ if (nh == NULL) {
+ same = re;
+ break;
+ }
+ for (ALL_NEXTHOPS(re->nhe->nhg, rtnh)) {
+ /*
+ * No guarantee all kernel send nh with labels
+ * on delete.
+ */
+ if (nexthop_same_no_labels(rtnh, nh)) {
+ same = re;
+ break;
+ }
+ }
+
+ if (same)
+ break;
+ }
+ /*
+ * If same type of route can't be found and this message is from
+ * kernel.
+ */
+ if (!same) {
+ /*
+ * In the past(HA!) we could get here because
+ * we were receiving a route delete from the
+ * kernel and we're not marking the proto
+ * as coming from it's appropriate originator.
+ * Now that we are properly noticing the fact
+ * that the kernel has deleted our route we
+ * are not going to get called in this path
+ * I am going to leave this here because
+ * this might still work this way on non-linux
+ * platforms as well as some weird state I have
+ * not properly thought of yet.
+ * If we can show that this code path is
+ * dead then we can remove it.
+ */
+ if (fib && CHECK_FLAG(ere->re->flags, ZEBRA_FLAG_SELFROUTE)) {
+ if (IS_ZEBRA_DEBUG_RIB) {
+ rnode_debug(
+ rn, ere->re->vrf_id,
+ "rn %p, re %p (%s) was deleted from kernel, adding",
+ rn, fib, zebra_route_string(fib->type));
+ }
+ if (zrouter.allow_delete ||
+ CHECK_FLAG(dest->flags, RIB_ROUTE_ANY_QUEUED)) {
+ UNSET_FLAG(fib->status, ROUTE_ENTRY_INSTALLED);
+ /* Unset flags. */
+ for (rtnh = fib->nhe->nhg.nexthop; rtnh;
+ rtnh = rtnh->next)
+ UNSET_FLAG(rtnh->flags,
+ NEXTHOP_FLAG_FIB);
+
+ /*
+ * This is a non FRR route
+ * as such we should mark
+ * it as deleted
+ */
+ dest->selected_fib = NULL;
+ } else {
+ /*
+ * This means someone else, other than Zebra,
+ * has deleted a Zebra router from the kernel.
+ * We will add it back
+ */
+ rib_install_kernel(rn, fib, NULL);
+ }
+ } else {
+ if (IS_ZEBRA_DEBUG_RIB) {
+ if (nh)
+ rnode_debug(
+ rn, ere->re->vrf_id,
+ "via %s ifindex %d type %d doesn't exist in rib",
+ inet_ntop(afi2family(ere->afi),
+ &nh->gate, buf2,
+ sizeof(buf2)),
+ nh->ifindex, ere->re->type);
+ else
+ rnode_debug(
+ rn, ere->re->vrf_id,
+ "type %d doesn't exist in rib",
+ ere->re->type);
+ }
+ route_unlock_node(rn);
+ early_route_memory_free(ere);
+ return;
+ }
+ }
+
+ if (same) {
+ struct nexthop *tmp_nh;
+
+ if (ere->fromkernel &&
+ CHECK_FLAG(ere->re->flags, ZEBRA_FLAG_SELFROUTE) &&
+ !zrouter.allow_delete) {
+ rib_install_kernel(rn, same, NULL);
+ route_unlock_node(rn);
+
+ early_route_memory_free(ere);
+ return;
+ }
+
+ /* Special handling for IPv4 or IPv6 routes sourced from
+ * EVPN - the nexthop (and associated MAC) need to be
+ * uninstalled if no more refs.
+ */
+ for (ALL_NEXTHOPS(re->nhe->nhg, tmp_nh)) {
+ struct ipaddr vtep_ip;
+
+ if (CHECK_FLAG(tmp_nh->flags, NEXTHOP_FLAG_EVPN)) {
+ memset(&vtep_ip, 0, sizeof(struct ipaddr));
+ if (ere->afi == AFI_IP) {
+ vtep_ip.ipa_type = IPADDR_V4;
+ memcpy(&(vtep_ip.ipaddr_v4),
+ &(tmp_nh->gate.ipv4),
+ sizeof(struct in_addr));
+ } else {
+ vtep_ip.ipa_type = IPADDR_V6;
+ memcpy(&(vtep_ip.ipaddr_v6),
+ &(tmp_nh->gate.ipv6),
+ sizeof(struct in6_addr));
+ }
+ zebra_rib_queue_evpn_route_del(
+ re->vrf_id, &vtep_ip, &ere->p);
+ }
+ }
+
+ /* Notify dplane if system route changes */
+ if (RIB_SYSTEM_ROUTE(re))
+ dplane_sys_route_del(rn, same);
+
+ rib_delnode(rn, same);
+ }
+
+ route_unlock_node(rn);
+
+ early_route_memory_free(ere);
+}
+
+/*
+ * When FRR receives a route we need to match the route up to
+ * nexthop groups. That we also may have just received
+ * place the data on this queue so that this work of finding
+ * the nexthop group entries for the route entry is always
+ * done after the nexthop group has had a chance to be processed
+ */
+static void process_subq_early_route(struct listnode *lnode)
+{
+ struct zebra_early_route *ere = listgetdata(lnode);
+
+ if (ere->deletion)
+ process_subq_early_route_delete(ere);
+ else
+ process_subq_early_route_add(ere);
+}
+
/*
* Examine the specified subqueue; process one entry and return 1 if
* there is a node, return 0 otherwise.
@@ -2525,6 +3035,12 @@ static unsigned int process_subq(struct list *subq,
case META_QUEUE_NHG:
process_subq_nhg(lnode);
break;
+ case META_QUEUE_EARLY_ROUTE:
+ process_subq_early_route(lnode);
+ break;
+ case META_QUEUE_EARLY_LABEL:
+ process_subq_early_label(lnode);
+ break;
case META_QUEUE_CONNECTED:
case META_QUEUE_KERNEL:
case META_QUEUE_STATIC:
@@ -2555,8 +3071,9 @@ static wq_item_status meta_queue_process(struct work_queue *dummy, void *data)
queue_len = dplane_get_in_queue_len();
if (queue_len > queue_limit) {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug("rib queue: dplane queue len %u, limit %u, retrying",
- queue_len, queue_limit);
+ zlog_debug(
+ "rib queue: dplane queue len %u, limit %u, retrying",
+ queue_len, queue_limit);
/* Ensure that the meta-queue is actually enqueued */
if (work_queue_empty(zrouter.ribq))
@@ -2635,6 +3152,13 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
return 0;
}
+static int early_label_meta_queue_add(struct meta_queue *mq, void *data)
+{
+ listnode_add(mq->subq[META_QUEUE_EARLY_LABEL], data);
+ mq->size++;
+ return 0;
+}
+
static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data)
{
struct nhg_ctx *ctx = NULL;
@@ -2718,6 +3242,44 @@ static int mq_add_handler(void *data,
return mq_add_func(zrouter.mq, data);
}
+void mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ struct prefix *prefix, uint8_t route_type,
+ uint8_t route_instance)
+{
+ struct wq_label_wrapper *w;
+
+ w = XCALLOC(MTYPE_WQ_WRAPPER, sizeof(struct wq_label_wrapper));
+
+ w->type = WQ_LABEL_FTN_UNINSTALL;
+ w->vrf_id = zvrf->vrf->vrf_id;
+ w->p = *prefix;
+ w->ltype = type;
+ w->route_type = route_type;
+ w->route_instance = route_instance;
+
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("Early Label Handling for %pFX", prefix);
+
+ mq_add_handler(w, early_label_meta_queue_add);
+}
+
+void mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
+ const struct zapi_labels *zl)
+{
+ struct wq_label_wrapper *w;
+
+ w = XCALLOC(MTYPE_WQ_WRAPPER, sizeof(struct wq_label_wrapper));
+ w->type = WQ_LABEL_LABELS_PROCESS;
+ w->vrf_id = zvrf->vrf->vrf_id;
+ w->add_p = add_p;
+ w->zl = *zl;
+
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("Early Label Handling: Labels Process");
+
+ mq_add_handler(w, early_label_meta_queue_add);
+}
+
/* Add route_node to work queue and schedule processing */
int rib_queue_add(struct route_node *rn)
{
@@ -2958,7 +3520,6 @@ int zebra_rib_queue_evpn_rem_vtep_del(vrf_id_t vrf_id, vni_t vni,
return mq_add_handler(w, rib_meta_queue_evpn_add);
}
-
/* Create new meta queue.
A destructor function doesn't seem to be necessary here.
*/
@@ -3034,6 +3595,29 @@ static void nhg_meta_queue_free(struct meta_queue *mq, struct list *l,
}
}
+static void early_label_meta_queue_free(struct meta_queue *mq, struct list *l,
+ struct zebra_vrf *zvrf)
+{
+ struct wq_label_wrapper *w;
+ struct listnode *node, *nnode;
+
+ for (ALL_LIST_ELEMENTS(l, node, nnode, w)) {
+ if (zvrf && zvrf->vrf->vrf_id != w->vrf_id)
+ continue;
+
+ switch (w->type) {
+ case WQ_LABEL_FTN_UNINSTALL:
+ case WQ_LABEL_LABELS_PROCESS:
+ break;
+ }
+
+ node->data = NULL;
+ XFREE(MTYPE_WQ_WRAPPER, w);
+ list_delete_node(l, node);
+ mq->size--;
+ }
+}
+
static void rib_meta_queue_free(struct meta_queue *mq, struct list *l,
struct zebra_vrf *zvrf)
{
@@ -3053,6 +3637,22 @@ static void rib_meta_queue_free(struct meta_queue *mq, struct list *l,
}
}
+static void early_route_meta_queue_free(struct meta_queue *mq, struct list *l,
+ struct zebra_vrf *zvrf)
+{
+ struct zebra_early_route *zer;
+ struct listnode *node, *nnode;
+
+ for (ALL_LIST_ELEMENTS(l, node, nnode, zer)) {
+ if (zvrf && zer->re->vrf_id != zvrf->vrf->vrf_id)
+ continue;
+
+ XFREE(MTYPE_RE, zer);
+ node->data = NULL;
+ list_delete_node(l, node);
+ mq->size--;
+ }
+}
void meta_queue_free(struct meta_queue *mq, struct zebra_vrf *zvrf)
{
@@ -3067,6 +3667,12 @@ void meta_queue_free(struct meta_queue *mq, struct zebra_vrf *zvrf)
case META_QUEUE_EVPN:
evpn_meta_queue_free(mq, mq->subq[i], zvrf);
break;
+ case META_QUEUE_EARLY_ROUTE:
+ early_route_meta_queue_free(mq, mq->subq[i], zvrf);
+ break;
+ case META_QUEUE_EARLY_LABEL:
+ early_label_meta_queue_free(mq, mq->subq[i], zvrf);
+ break;
case META_QUEUE_CONNECTED:
case META_QUEUE_KERNEL:
case META_QUEUE_STATIC:
@@ -3210,17 +3816,6 @@ static void rib_addnode(struct route_node *rn,
rib_link(rn, re, process);
}
-static void rib_re_nhg_free(struct route_entry *re)
-{
- if (re->nhe && re->nhe_id) {
- assert(re->nhe->id == re->nhe_id);
- route_entry_update_nhe(re, NULL);
- } else if (re->nhe && re->nhe->nhg.nexthop)
- nexthops_free(re->nhe->nhg.nexthop);
-
- nexthops_free(re->fib_ng.nexthop);
-}
-
/*
* rib_unlink
*
@@ -3426,6 +4021,46 @@ void _route_entry_dump(const char *func, union prefixconstptr pp,
zlog_debug("%s: dump complete", straddr);
}
+static int rib_meta_queue_early_route_add(struct meta_queue *mq, void *data)
+{
+ struct zebra_early_route *ere = data;
+
+ listnode_add(mq->subq[META_QUEUE_EARLY_ROUTE], data);
+ mq->size++;
+
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug(
+ "Route %pFX(%u) queued for processing into sub-queue %s",
+ &ere->p, ere->re->vrf_id,
+ subqueue2str(META_QUEUE_EARLY_ROUTE));
+
+ return 0;
+}
+
+struct route_entry *zebra_rib_route_entry_new(vrf_id_t vrf_id, int type,
+ uint8_t instance, uint32_t flags,
+ uint32_t nhe_id,
+ uint32_t table_id,
+ uint32_t metric, uint32_t mtu,
+ uint8_t distance, route_tag_t tag)
+{
+ struct route_entry *re;
+
+ re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
+ re->type = type;
+ re->instance = instance;
+ re->distance = distance;
+ re->flags = flags;
+ re->metric = metric;
+ re->mtu = mtu;
+ re->table = table_id;
+ re->vrf_id = vrf_id;
+ re->uptime = monotime(NULL);
+ re->tag = tag;
+ re->nhe_id = nhe_id;
+
+ return re;
+}
/*
* Internal route-add implementation; there are a couple of different public
* signatures. Callers in this path are responsible for the memory they
@@ -3441,162 +4076,25 @@ int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p,
struct prefix_ipv6 *src_p, struct route_entry *re,
struct nhg_hash_entry *re_nhe, bool startup)
{
- struct nhg_hash_entry *nhe = NULL;
- struct route_table *table;
- struct route_node *rn;
- struct route_entry *same = NULL, *first_same = NULL;
- int ret = 0;
- int same_count = 0;
- rib_dest_t *dest;
+ struct zebra_early_route *ere;
- if (!re || !re_nhe)
+ if (!re)
return -1;
assert(!src_p || !src_p->prefixlen || afi == AFI_IP6);
- /* Lookup table. */
- table = zebra_vrf_get_table_with_table_id(afi, safi, re->vrf_id,
- re->table);
- if (!table)
- return -1;
-
- if (re->nhe_id > 0) {
- nhe = zebra_nhg_lookup_id(re->nhe_id);
-
- if (!nhe) {
- flog_err(
- EC_ZEBRA_TABLE_LOOKUP_FAILED,
- "Zebra failed to find the nexthop hash entry for id=%u in a route entry",
- re->nhe_id);
-
- return -1;
- }
- } else {
- /* Lookup nhe from route information */
- nhe = zebra_nhg_rib_find_nhe(re_nhe, afi);
- if (!nhe) {
- char buf2[PREFIX_STRLEN] = "";
-
- flog_err(
- EC_ZEBRA_TABLE_LOOKUP_FAILED,
- "Zebra failed to find or create a nexthop hash entry for %pFX%s%s",
- p, src_p ? " from " : "",
- src_p ? prefix2str(src_p, buf2, sizeof(buf2))
- : "");
-
- return -1;
- }
- }
-
- /*
- * Attach the re to the nhe's nexthop group.
- *
- * TODO: This will need to change when we start getting IDs from upper
- * level protocols, as the refcnt might be wrong, since it checks
- * if old_id != new_id.
- */
- route_entry_update_nhe(re, nhe);
-
- /* Make it sure prefixlen is applied to the prefix. */
- apply_mask(p);
+ ere = XCALLOC(MTYPE_WQ_WRAPPER, sizeof(*ere));
+ ere->afi = afi;
+ ere->safi = safi;
+ ere->p = *p;
if (src_p)
- apply_mask_ipv6(src_p);
-
- /* Set default distance by route type. */
- if (re->distance == 0)
- re->distance = route_distance(re->type);
-
- /* Lookup route node.*/
- rn = srcdest_rnode_get(table, p, src_p);
-
- /*
- * If same type of route are installed, treat it as a implicit
- * withdraw. If the user has specified the No route replace semantics
- * for the install don't do a route replace.
- */
- RNODE_FOREACH_RE (rn, same) {
- if (CHECK_FLAG(same->status, ROUTE_ENTRY_REMOVED)) {
- same_count++;
- continue;
- }
-
- /* Compare various route_entry properties */
- if (rib_compare_routes(re, same)) {
- same_count++;
-
- if (first_same == NULL)
- first_same = same;
- }
- }
-
- same = first_same;
-
- if (!startup &&
- (re->flags & ZEBRA_FLAG_SELFROUTE) && zrouter.asic_offloaded) {
- if (!same) {
- if (IS_ZEBRA_DEBUG_RIB)
- zlog_debug("prefix: %pRN is a self route where we do not have an entry for it. Dropping this update, it's useless", rn);
- /*
- * We are not on startup, this is a self route
- * and we have asic offload. Which means
- * we are getting a callback for a entry
- * that was already deleted to the kernel
- * but an earlier response was just handed
- * back. Drop it on the floor
- */
- rib_re_nhg_free(re);
-
- XFREE(MTYPE_RE, re);
- return ret;
- }
- }
-
- /* If this route is kernel/connected route, notify the dataplane. */
- if (RIB_SYSTEM_ROUTE(re)) {
- /* Notify dataplane */
- dplane_sys_route_add(rn, re);
- }
-
- /* Link new re to node.*/
- if (IS_ZEBRA_DEBUG_RIB) {
- rnode_debug(rn, re->vrf_id,
- "Inserting route rn %p, re %p (%s) existing %p, same_count %d",
- rn, re, zebra_route_string(re->type), same,
- same_count);
-
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- route_entry_dump(p, src_p, re);
- }
-
- SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
- rib_addnode(rn, re, 1);
-
- /* Free implicit route.*/
- if (same) {
- ret = 1;
- rib_delnode(rn, same);
- }
-
- /* See if we can remove some RE entries that are queued for
- * removal, but won't be considered in rib processing.
- */
- dest = rib_dest_from_rnode(rn);
- RNODE_FOREACH_RE_SAFE (rn, re, same) {
- if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED)) {
- /* If the route was used earlier, must retain it. */
- if (dest && re == dest->selected_fib)
- continue;
-
- if (IS_ZEBRA_DEBUG_RIB)
- rnode_debug(rn, re->vrf_id, "rn %p, removing unneeded re %p",
- rn, re);
+ ere->src_p = *src_p;
+ ere->src_p_provided = !!src_p;
+ ere->re = re;
+ ere->re_nhe = re_nhe;
+ ere->startup = startup;
- rib_unlink(rn, re);
- }
- }
-
- route_unlock_node(rn);
- return ret;
+ return mq_add_handler(ere, rib_meta_queue_early_route_add);
}
/*
@@ -3607,7 +4105,7 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
struct nexthop_group *ng, bool startup)
{
int ret;
- struct nhg_hash_entry nhe;
+ struct nhg_hash_entry nhe, *n;
if (!re)
return -1;
@@ -3625,10 +4123,8 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
else if (re->nhe_id > 0)
nhe.id = re->nhe_id;
- ret = rib_add_multipath_nhe(afi, safi, p, src_p, re, &nhe, startup);
-
- /* In this path, the callers expect memory to be freed. */
- nexthop_group_delete(&ng);
+ n = zebra_nhe_copy(&nhe, 0);
+ ret = rib_add_multipath_nhe(afi, safi, p, src_p, re, n, startup);
/* In error cases, free the route also */
if (ret < 0)
@@ -3643,212 +4139,32 @@ void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
uint32_t nhe_id, uint32_t table_id, uint32_t metric,
uint8_t distance, bool fromkernel)
{
- struct route_table *table;
- struct route_node *rn;
- struct route_entry *re;
- struct route_entry *fib = NULL;
- struct route_entry *same = NULL;
- struct nexthop *rtnh;
- char buf2[INET6_ADDRSTRLEN];
- rib_dest_t *dest;
-
- assert(!src_p || !src_p->prefixlen || afi == AFI_IP6);
-
- /* Lookup table. */
- table = zebra_vrf_lookup_table_with_table_id(afi, safi, vrf_id,
- table_id);
- if (!table)
- return;
-
- /* Apply mask. */
- apply_mask(p);
- if (src_p)
- apply_mask_ipv6(src_p);
-
- /* Lookup route node. */
- rn = srcdest_rnode_lookup(table, p, src_p);
- if (!rn) {
- if (IS_ZEBRA_DEBUG_RIB) {
- char src_buf[PREFIX_STRLEN];
- struct vrf *vrf = vrf_lookup_by_id(vrf_id);
-
- if (src_p && src_p->prefixlen)
- prefix2str(src_p, src_buf, sizeof(src_buf));
- else
- src_buf[0] = '\0';
-
- zlog_debug("%s[%d]:%pRN%s%s doesn't exist in rib",
- vrf->name, table_id, rn,
- (src_buf[0] != '\0') ? " from " : "",
- src_buf);
- }
- return;
- }
-
- dest = rib_dest_from_rnode(rn);
- fib = dest->selected_fib;
-
- /* Lookup same type route. */
- RNODE_FOREACH_RE (rn, re) {
- if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED))
- continue;
-
- if (re->type != type)
- continue;
- if (re->instance != instance)
- continue;
- if (CHECK_FLAG(re->flags, ZEBRA_FLAG_RR_USE_DISTANCE) &&
- distance != re->distance)
- continue;
-
- if (re->type == ZEBRA_ROUTE_KERNEL && re->metric != metric)
- continue;
- if (re->type == ZEBRA_ROUTE_CONNECT &&
- (rtnh = re->nhe->nhg.nexthop)
- && rtnh->type == NEXTHOP_TYPE_IFINDEX && nh) {
- if (rtnh->ifindex != nh->ifindex)
- continue;
- same = re;
- break;
- }
-
- /* Make sure that the route found has the same gateway. */
- if (nhe_id && re->nhe_id == nhe_id) {
- same = re;
- break;
- }
-
- if (nh == NULL) {
- same = re;
- break;
- }
- for (ALL_NEXTHOPS(re->nhe->nhg, rtnh)) {
- /*
- * No guarantee all kernel send nh with labels
- * on delete.
- */
- if (nexthop_same_no_labels(rtnh, nh)) {
- same = re;
- break;
- }
- }
-
- if (same)
- break;
- }
- /* If same type of route can't be found and this message is from
- kernel. */
- if (!same) {
- /*
- * In the past(HA!) we could get here because
- * we were receiving a route delete from the
- * kernel and we're not marking the proto
- * as coming from it's appropriate originator.
- * Now that we are properly noticing the fact
- * that the kernel has deleted our route we
- * are not going to get called in this path
- * I am going to leave this here because
- * this might still work this way on non-linux
- * platforms as well as some weird state I have
- * not properly thought of yet.
- * If we can show that this code path is
- * dead then we can remove it.
- */
- if (fib && CHECK_FLAG(flags, ZEBRA_FLAG_SELFROUTE)) {
- if (IS_ZEBRA_DEBUG_RIB) {
- rnode_debug(rn, vrf_id,
- "rn %p, re %p (%s) was deleted from kernel, adding",
- rn, fib,
- zebra_route_string(fib->type));
- }
- if (zrouter.allow_delete ||
- CHECK_FLAG(dest->flags, RIB_ROUTE_ANY_QUEUED)) {
- UNSET_FLAG(fib->status, ROUTE_ENTRY_INSTALLED);
- /* Unset flags. */
- for (rtnh = fib->nhe->nhg.nexthop; rtnh;
- rtnh = rtnh->next)
- UNSET_FLAG(rtnh->flags,
- NEXTHOP_FLAG_FIB);
-
- /*
- * This is a non FRR route
- * as such we should mark
- * it as deleted
- */
- dest->selected_fib = NULL;
- } else {
- /* This means someone else, other than Zebra,
- * has deleted
- * a Zebra router from the kernel. We will add
- * it back */
- rib_install_kernel(rn, fib, NULL);
- }
- } else {
- if (IS_ZEBRA_DEBUG_RIB) {
- if (nh)
- rnode_debug(
- rn, vrf_id,
- "via %s ifindex %d type %d doesn't exist in rib",
- inet_ntop(afi2family(afi),
- &nh->gate, buf2,
- sizeof(buf2)),
- nh->ifindex, type);
- else
- rnode_debug(
- rn, vrf_id,
- "type %d doesn't exist in rib",
- type);
- }
- route_unlock_node(rn);
- return;
- }
- }
-
- if (same) {
- struct nexthop *tmp_nh;
-
- if (fromkernel && CHECK_FLAG(flags, ZEBRA_FLAG_SELFROUTE) &&
- !zrouter.allow_delete) {
- rib_install_kernel(rn, same, NULL);
- route_unlock_node(rn);
-
- return;
- }
-
- /* Special handling for IPv4 or IPv6 routes sourced from
- * EVPN - the nexthop (and associated MAC) need to be
- * uninstalled if no more refs.
- */
- for (ALL_NEXTHOPS(re->nhe->nhg, tmp_nh)) {
- struct ipaddr vtep_ip;
-
- if (CHECK_FLAG(tmp_nh->flags, NEXTHOP_FLAG_EVPN)) {
- memset(&vtep_ip, 0, sizeof(struct ipaddr));
- if (afi == AFI_IP) {
- vtep_ip.ipa_type = IPADDR_V4;
- memcpy(&(vtep_ip.ipaddr_v4),
- &(tmp_nh->gate.ipv4),
- sizeof(struct in_addr));
- } else {
- vtep_ip.ipa_type = IPADDR_V6;
- memcpy(&(vtep_ip.ipaddr_v6),
- &(tmp_nh->gate.ipv6),
- sizeof(struct in6_addr));
- }
- zebra_rib_queue_evpn_route_del(re->vrf_id,
- &vtep_ip, p);
- }
- }
+ struct zebra_early_route *ere;
+ struct route_entry *re = NULL;
+ struct nhg_hash_entry *nhe = NULL;
- /* Notify dplane if system route changes */
- if (RIB_SYSTEM_ROUTE(re))
- dplane_sys_route_del(rn, same);
+ re = zebra_rib_route_entry_new(vrf_id, type, instance, flags, nhe_id,
+ table_id, metric, 0, distance, 0);
- rib_delnode(rn, same);
+ if (nh) {
+ nhe = zebra_nhg_alloc();
+ nhe->nhg.nexthop = nexthop_dup(nh, NULL);
}
- route_unlock_node(rn);
- return;
+ ere = XCALLOC(MTYPE_WQ_WRAPPER, sizeof(*ere));
+ ere->afi = afi;
+ ere->safi = safi;
+ ere->p = *p;
+ if (src_p)
+ ere->src_p = *src_p;
+ ere->src_p_provided = !!src_p;
+ ere->re = re;
+ ere->re_nhe = nhe;
+ ere->startup = false;
+ ere->deletion = true;
+ ere->fromkernel = fromkernel;
+
+ mq_add_handler(ere, rib_meta_queue_early_route_add);
}
@@ -3859,36 +4175,23 @@ int rib_add(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
uint8_t distance, route_tag_t tag, bool startup)
{
struct route_entry *re = NULL;
- struct nexthop *nexthop = NULL;
- struct nexthop_group *ng = NULL;
+ struct nexthop nexthop = {};
+ struct nexthop_group ng = {};
/* Allocate new route_entry structure. */
- re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
- re->type = type;
- re->instance = instance;
- re->distance = distance;
- re->flags = flags;
- re->metric = metric;
- re->mtu = mtu;
- re->table = table_id;
- re->vrf_id = vrf_id;
- re->uptime = monotime(NULL);
- re->tag = tag;
- re->nhe_id = nhe_id;
+ re = zebra_rib_route_entry_new(vrf_id, type, instance, flags, nhe_id,
+ table_id, metric, mtu, distance, tag);
/* If the owner of the route supplies a shared nexthop-group id,
* we'll use that. Otherwise, pass the nexthop along directly.
*/
if (!nhe_id) {
- ng = nexthop_group_new();
-
/* Add nexthop. */
- nexthop = nexthop_new();
- *nexthop = *nh;
- nexthop_group_add_sorted(ng, nexthop);
+ nexthop = *nh;
+ nexthop_group_add_sorted(&ng, &nexthop);
}
- return rib_add_multipath(afi, safi, p, src_p, re, ng, startup);
+ return rib_add_multipath(afi, safi, p, src_p, re, &ng, startup);
}
static const char *rib_update_event2str(enum rib_update_event event)
diff --git a/zebra/zebra_script.c b/zebra/zebra_script.c
index d247f87708..2e2f4159cd 100644
--- a/zebra/zebra_script.c
+++ b/zebra/zebra_script.c
@@ -329,14 +329,6 @@ void lua_pushzebra_dplane_ctx(lua_State *L, const struct zebra_dplane_ctx *ctx)
lua_setfield(L, -2, "ipset");
break;
}
- case DPLANE_OP_ADDR_INSTALL:
- case DPLANE_OP_ADDR_UNINSTALL:
- case DPLANE_OP_INTF_ADDR_ADD:
- case DPLANE_OP_INTF_ADDR_DEL:
- case DPLANE_OP_INTF_INSTALL:
- case DPLANE_OP_INTF_UPDATE:
- case DPLANE_OP_INTF_DELETE:
- break;
case DPLANE_OP_NEIGH_INSTALL:
case DPLANE_OP_NEIGH_UPDATE:
case DPLANE_OP_NEIGH_DELETE:
@@ -418,6 +410,17 @@ void lua_pushzebra_dplane_ctx(lua_State *L, const struct zebra_dplane_ctx *ctx)
}
lua_setfield(L, -2, "gre");
+ case DPLANE_OP_ADDR_INSTALL:
+ case DPLANE_OP_ADDR_UNINSTALL:
+ case DPLANE_OP_INTF_ADDR_ADD:
+ case DPLANE_OP_INTF_ADDR_DEL:
+ case DPLANE_OP_INTF_INSTALL:
+ case DPLANE_OP_INTF_UPDATE:
+ case DPLANE_OP_INTF_DELETE:
+ case DPLANE_OP_TC_INSTALL:
+ case DPLANE_OP_TC_UPDATE:
+ case DPLANE_OP_TC_DELETE:
+ /* Not currently handled */
case DPLANE_OP_INTF_NETCONFIG: /*NYI*/
case DPLANE_OP_NONE:
break;
diff --git a/zebra/zebra_srv6.c b/zebra/zebra_srv6.c
index 219d047694..36506cacc7 100644
--- a/zebra/zebra_srv6.c
+++ b/zebra/zebra_srv6.c
@@ -162,6 +162,7 @@ void zebra_srv6_locator_delete(struct srv6_locator *locator)
}
listnode_delete(srv6->locators, locator);
+ srv6_locator_free(locator);
}
struct srv6_locator *zebra_srv6_locator_lookup(const char *name)
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index 5a6321ae7e..34cce71cd7 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -2090,6 +2090,7 @@ static int zebra_vxlan_handle_vni_transition(struct zebra_vrf *zvrf, vni_t vni,
int add)
{
struct zebra_evpn *zevpn = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
/* There is a possibility that VNI notification was already received
* from kernel and we programmed it as L2-VNI
@@ -2117,6 +2118,10 @@ static int zebra_vxlan_handle_vni_transition(struct zebra_vrf *zvrf, vni_t vni,
/* Free up all remote VTEPs, if any. */
zebra_evpn_vtep_del_all(zevpn, 1);
+ zl3vni = zl3vni_from_vrf(zevpn->vrf_id);
+ if (zl3vni)
+ listnode_delete(zl3vni->l2vnis, zevpn);
+
/* Delete the hash entry. */
if (zebra_evpn_vxlan_del(zevpn)) {
flog_err(EC_ZEBRA_VNI_DEL_FAILED,
@@ -2172,8 +2177,12 @@ static int zebra_vxlan_handle_vni_transition(struct zebra_vrf *zvrf, vni_t vni,
/* Find bridge interface for the VNI */
vlan_if = zvni_map_to_svi(vxl->access_vlan,
zif->brslave_info.br_if);
- if (vlan_if)
+ if (vlan_if) {
zevpn->vrf_id = vlan_if->vrf->vrf_id;
+ zl3vni = zl3vni_from_vrf(vlan_if->vrf->vrf_id);
+ if (zl3vni)
+ listnode_add_sort_nodup(zl3vni->l2vnis, zevpn);
+ }
zevpn->vxlan_if = ifp;
zevpn->local_vtep_ip = vxl->vtep_ip;
@@ -5139,10 +5148,9 @@ int zebra_vxlan_if_update(struct interface *ifp, uint16_t chgflags)
return 0;
/* Inform BGP, if there is a change of interest. */
- if (chgflags
- & (ZEBRA_VXLIF_MASTER_CHANGE |
- ZEBRA_VXLIF_LOCAL_IP_CHANGE |
- ZEBRA_VXLIF_MCAST_GRP_CHANGE))
+ if (chgflags &
+ (ZEBRA_VXLIF_MASTER_CHANGE | ZEBRA_VXLIF_LOCAL_IP_CHANGE |
+ ZEBRA_VXLIF_MCAST_GRP_CHANGE | ZEBRA_VXLIF_VLAN_CHANGE))
zebra_evpn_send_add_to_client(zevpn);
/* If there is a valid new master or a VLAN mapping change,
diff --git a/zebra/zserv.c b/zebra/zserv.c
index f76b29deff..ebe246ffbc 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -228,8 +228,7 @@ static void zserv_write(struct thread *thread)
case BUFFER_ERROR:
goto zwrite_fail;
case BUFFER_PENDING:
- atomic_store_explicit(&client->last_write_time,
- (uint32_t)monotime(NULL),
+ atomic_store_explicit(&client->last_write_time, monotime(NULL),
memory_order_relaxed);
zserv_client_event(client, ZSERV_CLIENT_WRITE);
return;
@@ -264,8 +263,7 @@ static void zserv_write(struct thread *thread)
case BUFFER_ERROR:
goto zwrite_fail;
case BUFFER_PENDING:
- atomic_store_explicit(&client->last_write_time,
- (uint32_t)monotime(NULL),
+ atomic_store_explicit(&client->last_write_time, monotime(NULL),
memory_order_relaxed);
zserv_client_event(client, ZSERV_CLIENT_WRITE);
return;
@@ -276,8 +274,8 @@ static void zserv_write(struct thread *thread)
atomic_store_explicit(&client->last_write_cmd, wcmd,
memory_order_relaxed);
- atomic_store_explicit(&client->last_write_time,
- (uint32_t)monotime(NULL), memory_order_relaxed);
+ atomic_store_explicit(&client->last_write_time, monotime(NULL),
+ memory_order_relaxed);
return;
@@ -748,7 +746,7 @@ static struct zserv *zserv_client_create(int sock)
client->wb = buffer_new(0);
TAILQ_INIT(&(client->gr_info_queue));
- atomic_store_explicit(&client->connect_time, (uint32_t) monotime(NULL),
+ atomic_store_explicit(&client->connect_time, monotime(NULL),
memory_order_relaxed);
/* Initialize flags */
diff --git a/zebra/zserv.h b/zebra/zserv.h
index 9986cc9f7e..db7b70d7c4 100644
--- a/zebra/zserv.h
+++ b/zebra/zserv.h
@@ -216,15 +216,15 @@ struct zserv {
*/
/* monotime of client creation */
- _Atomic uint32_t connect_time;
+ _Atomic uint64_t connect_time;
/* monotime of last message received */
- _Atomic uint32_t last_read_time;
+ _Atomic uint64_t last_read_time;
/* monotime of last message sent */
- _Atomic uint32_t last_write_time;
+ _Atomic uint64_t last_write_time;
/* command code of last message read */
- _Atomic uint32_t last_read_cmd;
+ _Atomic uint64_t last_read_cmd;
/* command code of last message written */
- _Atomic uint32_t last_write_cmd;
+ _Atomic uint64_t last_write_cmd;
/*
* Number of instances configured with