diff options
58 files changed, 2791 insertions, 1495 deletions
diff --git a/bfdd/bfdd_nb_config.c b/bfdd/bfdd_nb_config.c index de11997d1a..915a2f6419 100644 --- a/bfdd/bfdd_nb_config.c +++ b/bfdd/bfdd_nb_config.c @@ -257,6 +257,7 @@ int bfdd_bfd_profile_detection_multiplier_modify(struct nb_cb_modify_args *args) bp = nb_running_get_entry(args->dnode, NULL, true); bp->detection_multiplier = yang_dnode_get_uint8(args->dnode, NULL); + bfd_profile_update(bp); return NB_OK; } diff --git a/bgpd/bgp_debug.c b/bgpd/bgp_debug.c index 2ca9e5ee13..f3d387a0e1 100644 --- a/bgpd/bgp_debug.c +++ b/bgpd/bgp_debug.c @@ -30,6 +30,7 @@ #include "memory.h" #include "queue.h" #include "filter.h" +#include "hook.h" #include "bgpd/bgpd.h" #include "bgpd/bgp_aspath.h" @@ -47,6 +48,9 @@ #include "bgpd/bgp_vty.h" #include "bgpd/bgp_flowspec.h" +DEFINE_HOOK(bgp_hook_config_write_debug, (struct vty *vty, bool running), + (vty, running)) + unsigned long conf_bgp_debug_as4; unsigned long conf_bgp_debug_neighbor_events; unsigned long conf_bgp_debug_events; @@ -2168,7 +2172,7 @@ DEFUN_NOSH (show_debugging_bgp, vty_out(vty, " BGP policy based routing debugging is on\n"); if (BGP_DEBUG(pbr, PBR_ERROR)) vty_out(vty, " BGP policy based routing error debugging is on\n"); - + hook_call(bgp_hook_config_write_debug, vty, false); vty_out(vty, "\n"); return CMD_SUCCESS; } @@ -2284,6 +2288,9 @@ static int bgp_config_write_debug(struct vty *vty) vty_out(vty, "debug bgp graceful-restart\n"); write++; } + + if (hook_call(bgp_hook_config_write_debug, vty, true)) + write++; return write; } diff --git a/bgpd/bgp_debug.h b/bgpd/bgp_debug.h index 69f25566a9..e021f19c45 100644 --- a/bgpd/bgp_debug.h +++ b/bgpd/bgp_debug.h @@ -21,9 +21,15 @@ #ifndef _QUAGGA_BGP_DEBUG_H #define _QUAGGA_BGP_DEBUG_H +#include "hook.h" +#include "vty.h" + #include "bgp_attr.h" #include "bgp_updgrp.h" +DECLARE_HOOK(bgp_hook_config_write_debug, (struct vty *vty, bool running), + (vty, running)) + /* sort of packet direction */ #define DUMP_ON 1 #define DUMP_SEND 2 diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c index d2257b0126..1a0e5c0cd3 100644 --- a/bgpd/bgp_evpn_vty.c +++ b/bgpd/bgp_evpn_vty.c @@ -4075,7 +4075,7 @@ DEFUN(show_bgp_l2vpn_evpn_es, */ DEFUN(show_bgp_l2vpn_evpn_summary, show_bgp_l2vpn_evpn_summary_cmd, - "show bgp [vrf VRFNAME] l2vpn evpn summary [failed] [json]", + "show bgp [vrf VRFNAME] l2vpn evpn summary [established|failed] [json]", SHOW_STR BGP_STR "bgp vrf\n" @@ -4083,6 +4083,7 @@ DEFUN(show_bgp_l2vpn_evpn_summary, L2VPN_HELP_STR EVPN_HELP_STR "Summary of BGP neighbor status\n" + "Show only sessions in Established state\n" "Show only sessions not in Established state\n" JSON_STR) { @@ -4090,13 +4091,17 @@ DEFUN(show_bgp_l2vpn_evpn_summary, bool uj = use_json(argc, argv); char *vrf = NULL; bool show_failed = false; + bool show_established = false; if (argv_find(argv, argc, "vrf", &idx_vrf)) vrf = argv[++idx_vrf]->arg; if (argv_find(argv, argc, "failed", &idx_vrf)) show_failed = true; - return bgp_show_summary_vty(vty, vrf, AFI_L2VPN, SAFI_EVPN, - show_failed, uj); + if (argv_find(argv, argc, "established", &idx_vrf)) + show_established = true; + + return bgp_show_summary_vty(vty, vrf, AFI_L2VPN, SAFI_EVPN, show_failed, + show_established, uj); } /* diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c index 33eaf9ae74..ebf2328a71 100644 --- a/bgpd/bgp_main.c +++ b/bgpd/bgp_main.c @@ -61,10 +61,16 @@ #include "bgpd/bgp_network.h" #include "bgpd/bgp_errors.h" +DEFINE_HOOK(bgp_hook_config_write_vrf, (struct vty *vty, struct vrf *vrf), + (vty, vrf)) + #ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/rfapi_backend.h" #endif +DEFINE_HOOK(bgp_hook_vrf_update, (struct vrf *vrf, bool enabled), + (vrf, enabled)) + /* bgpd options, we use GNU getopt library. */ static const struct option longopts[] = { {"bgp_port", required_argument, NULL, 'p'}, @@ -302,6 +308,7 @@ static int bgp_vrf_enable(struct vrf *vrf) if (old_vrf_id != bgp->vrf_id) bgp_redistribute_redo(bgp); bgp_instance_up(bgp); + hook_call(bgp_hook_vrf_update, vrf, true); vpn_leak_zebra_vrf_label_update(bgp, AFI_IP); vpn_leak_zebra_vrf_label_update(bgp, AFI_IP6); vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP, @@ -351,16 +358,37 @@ static int bgp_vrf_disable(struct vrf *vrf) if (old_vrf_id != bgp->vrf_id) bgp_unset_redist_vrf_bitmaps(bgp, old_vrf_id); bgp_instance_down(bgp); + hook_call(bgp_hook_vrf_update, vrf, false); } /* Note: This is a callback, the VRF will be deleted by the caller. */ return 0; } +static int bgp_vrf_config_write(struct vty *vty) +{ + struct vrf *vrf; + + RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { + if (vrf->vrf_id == VRF_DEFAULT) { + vty_out(vty, "!\n"); + continue; + } + vty_out(vty, "vrf %s\n", vrf->name); + + hook_call(bgp_hook_config_write_vrf, vty, vrf); + + vty_out(vty, " exit-vrf\n!\n"); + } + + return 0; +} + static void bgp_vrf_init(void) { vrf_init(bgp_vrf_new, bgp_vrf_enable, bgp_vrf_disable, bgp_vrf_delete, bgp_vrf_enable); + vrf_cmd_init(bgp_vrf_config_write, &bgpd_privs); } static void bgp_vrf_terminate(void) diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index ad437c6ba4..19e398fc88 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -7567,8 +7567,7 @@ static char *bgp_nexthop_hostname(struct peer *peer, struct bgp_nexthop_cache *bnc) { if (peer->hostname - && CHECK_FLAG(peer->bgp->flags, BGP_FLAG_SHOW_HOSTNAME) && bnc - && CHECK_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED)) + && CHECK_FLAG(peer->bgp->flags, BGP_FLAG_SHOW_NEXTHOP_HOSTNAME)) return peer->hostname; return NULL; } @@ -7578,6 +7577,7 @@ void route_vty_out(struct vty *vty, const struct prefix *p, struct bgp_path_info *path, int display, safi_t safi, json_object *json_paths) { + int len; struct attr *attr = path->attr; json_object *json_path = NULL; json_object *json_nexthops = NULL; @@ -7679,20 +7679,29 @@ void route_vty_out(struct vty *vty, const struct prefix *p, json_object_string_add(json_nexthop_global, "ip", nexthop); - if (nexthop_hostname) + if (path->peer->hostname) json_object_string_add(json_nexthop_global, "hostname", - nexthop_hostname); + path->peer->hostname); json_object_string_add(json_nexthop_global, "afi", (af == AF_INET) ? "ipv4" : "ipv6"); json_object_boolean_true_add(json_nexthop_global, "used"); - } else - vty_out(vty, "%s%s", - nexthop_hostname ? nexthop_hostname : nexthop, - vrf_id_str); + } else { + if (nexthop_hostname) + len = vty_out(vty, "%s(%s)%s", nexthop, + nexthop_hostname, vrf_id_str); + else + len = vty_out(vty, "%s%s", nexthop, vrf_id_str); + + len = 16 - len; + if (len < 1) + vty_out(vty, "\n%*s", 36, " "); + else + vty_out(vty, "%*s", len, " "); + } } else if (safi == SAFI_EVPN) { if (json_paths) { json_nexthop_global = json_object_new_object(); @@ -7700,20 +7709,29 @@ void route_vty_out(struct vty *vty, const struct prefix *p, json_object_string_add(json_nexthop_global, "ip", inet_ntoa(attr->nexthop)); - if (nexthop_hostname) + if (path->peer->hostname) json_object_string_add(json_nexthop_global, "hostname", - nexthop_hostname); + path->peer->hostname); json_object_string_add(json_nexthop_global, "afi", "ipv4"); json_object_boolean_true_add(json_nexthop_global, "used"); - } else - vty_out(vty, "%-16s%s", - nexthop_hostname ? nexthop_hostname - : inet_ntoa(attr->nexthop), - vrf_id_str); + } else { + if (nexthop_hostname) + len = vty_out(vty, "%pI4(%s)%s", &attr->nexthop, + nexthop_hostname, vrf_id_str); + else + len = vty_out(vty, "%pI4%s", &attr->nexthop, + vrf_id_str); + + len = 16 - len; + if (len < 1) + vty_out(vty, "\n%*s", 36, " "); + else + vty_out(vty, "%*s", len, " "); + } } else if (safi == SAFI_FLOWSPEC) { if (attr->nexthop.s_addr != INADDR_ANY) { if (json_paths) { @@ -7725,19 +7743,30 @@ void route_vty_out(struct vty *vty, const struct prefix *p, json_nexthop_global, "ip", inet_ntoa(attr->nexthop)); - if (nexthop_hostname) + if (path->peer->hostname) json_object_string_add( json_nexthop_global, "hostname", - nexthop_hostname); + path->peer->hostname); json_object_boolean_true_add( json_nexthop_global, "used"); } else { - vty_out(vty, "%-16s", - nexthop_hostname - ? nexthop_hostname - : inet_ntoa(attr->nexthop)); + if (nexthop_hostname) + len = vty_out(vty, "%pI4(%s)%s", + &attr->nexthop, + nexthop_hostname, + vrf_id_str); + else + len = vty_out(vty, "%pI4%s", + &attr->nexthop, + vrf_id_str); + + len = 16 - len; + if (len < 1) + vty_out(vty, "\n%*s", 36, " "); + else + vty_out(vty, "%*s", len, " "); } } } else if (p->family == AF_INET && !BGP_ATTR_NEXTHOP_AFI_IP6(attr)) { @@ -7747,29 +7776,33 @@ void route_vty_out(struct vty *vty, const struct prefix *p, json_object_string_add(json_nexthop_global, "ip", inet_ntoa(attr->nexthop)); - if (nexthop_hostname) + if (path->peer->hostname) json_object_string_add(json_nexthop_global, "hostname", - nexthop_hostname); + path->peer->hostname); json_object_string_add(json_nexthop_global, "afi", "ipv4"); json_object_boolean_true_add(json_nexthop_global, "used"); } else { - char buf[BUFSIZ]; + if (nexthop_hostname) + len = vty_out(vty, "%pI4(%s)%s", &attr->nexthop, + nexthop_hostname, vrf_id_str); + else + len = vty_out(vty, "%pI4%s", &attr->nexthop, + vrf_id_str); - snprintf(buf, sizeof(buf), "%s%s", - nexthop_hostname ? nexthop_hostname - : inet_ntoa(attr->nexthop), - vrf_id_str); - vty_out(vty, "%-16s", buf); + len = 16 - len; + if (len < 1) + vty_out(vty, "\n%*s", 36, " "); + else + vty_out(vty, "%*s", len, " "); } } /* IPv6 Next Hop */ else if (p->family == AF_INET6 || BGP_ATTR_NEXTHOP_AFI_IP6(attr)) { - int len; char buf[BUFSIZ]; if (json_paths) { @@ -7779,10 +7812,10 @@ void route_vty_out(struct vty *vty, const struct prefix *p, inet_ntop(AF_INET6, &attr->mp_nexthop_global, buf, BUFSIZ)); - if (nexthop_hostname) + if (path->peer->hostname) json_object_string_add(json_nexthop_global, "hostname", - nexthop_hostname); + path->peer->hostname); json_object_string_add(json_nexthop_global, "afi", "ipv6"); @@ -7801,10 +7834,10 @@ void route_vty_out(struct vty *vty, const struct prefix *p, &attr->mp_nexthop_local, buf, BUFSIZ)); - if (nexthop_hostname) + if (path->peer->hostname) json_object_string_add( json_nexthop_ll, "hostname", - nexthop_hostname); + path->peer->hostname); json_object_string_add(json_nexthop_ll, "afi", "ipv6"); @@ -7843,15 +7876,18 @@ void route_vty_out(struct vty *vty, const struct prefix *p, else vty_out(vty, "%*s", len, " "); } else { - len = vty_out( - vty, "%s%s", - nexthop_hostname - ? nexthop_hostname - : inet_ntop( - AF_INET6, - &attr->mp_nexthop_local, - buf, BUFSIZ), - vrf_id_str); + if (nexthop_hostname) + len = vty_out( + vty, "%pI6(%s)%s", + &attr->mp_nexthop_local, + nexthop_hostname, + vrf_id_str); + else + len = vty_out( + vty, "%pI6%s", + &attr->mp_nexthop_local, + vrf_id_str); + len = 16 - len; if (len < 1) @@ -7860,15 +7896,16 @@ void route_vty_out(struct vty *vty, const struct prefix *p, vty_out(vty, "%*s", len, " "); } } else { - len = vty_out( - vty, "%s%s", - nexthop_hostname - ? nexthop_hostname - : inet_ntop( - AF_INET6, - &attr->mp_nexthop_global, - buf, BUFSIZ), - vrf_id_str); + if (nexthop_hostname) + len = vty_out(vty, "%pI6(%s)%s", + &attr->mp_nexthop_global, + nexthop_hostname, + vrf_id_str); + else + len = vty_out(vty, "%pI6%s", + &attr->mp_nexthop_global, + vrf_id_str); + len = 16 - len; if (len < 1) @@ -7994,6 +8031,7 @@ void route_vty_out_tmp(struct vty *vty, const struct prefix *p, { json_object *json_status = NULL; json_object *json_net = NULL; + int len; char buff[BUFSIZ]; /* Route status display. */ @@ -8087,7 +8125,6 @@ void route_vty_out_tmp(struct vty *vty, const struct prefix *p, inet_ntoa(attr->nexthop)); } else if (p->family == AF_INET6 || BGP_ATTR_NEXTHOP_AFI_IP6(attr)) { - int len; char buf[BUFSIZ]; len = vty_out( @@ -8829,31 +8866,38 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn, json_nexthop_global, "ip", inet_ntoa(attr->mp_nexthop_global_in)); - if (nexthop_hostname) + if (path->peer->hostname) json_object_string_add( json_nexthop_global, "hostname", + path->peer->hostname); + } else { + if (nexthop_hostname) + vty_out(vty, " %pI4(%s)", + &attr->mp_nexthop_global_in, nexthop_hostname); - } else - vty_out(vty, " %s", - nexthop_hostname - ? nexthop_hostname - : inet_ntoa( - attr->mp_nexthop_global_in)); + else + vty_out(vty, " %pI4", + &attr->mp_nexthop_global_in); + } } else { if (json_paths) { json_object_string_add( json_nexthop_global, "ip", inet_ntoa(attr->nexthop)); - if (nexthop_hostname) + if (path->peer->hostname) json_object_string_add( json_nexthop_global, "hostname", + path->peer->hostname); + } else { + if (nexthop_hostname) + vty_out(vty, " %pI4(%s)", + &attr->nexthop, nexthop_hostname); - } else - vty_out(vty, " %s", - nexthop_hostname - ? nexthop_hostname - : inet_ntoa(attr->nexthop)); + else + vty_out(vty, " %pI4", + &attr->nexthop); + } } if (json_paths) @@ -8866,22 +8910,23 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn, inet_ntop(AF_INET6, &attr->mp_nexthop_global, buf, INET6_ADDRSTRLEN)); - if (nexthop_hostname) + if (path->peer->hostname) json_object_string_add(json_nexthop_global, "hostname", - nexthop_hostname); + path->peer->hostname); json_object_string_add(json_nexthop_global, "afi", "ipv6"); json_object_string_add(json_nexthop_global, "scope", "global"); } else { - vty_out(vty, " %s", - nexthop_hostname - ? nexthop_hostname - : inet_ntop(AF_INET6, - &attr->mp_nexthop_global, - buf, INET6_ADDRSTRLEN)); + if (nexthop_hostname) + vty_out(vty, " %pI6(%s)", + &attr->mp_nexthop_global, + nexthop_hostname); + else + vty_out(vty, " %pI6", + &attr->mp_nexthop_global); } } @@ -9059,10 +9104,10 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn, inet_ntop(AF_INET6, &attr->mp_nexthop_local, buf, INET6_ADDRSTRLEN)); - if (nexthop_hostname) + if (path->peer->hostname) json_object_string_add(json_nexthop_ll, "hostname", - nexthop_hostname); + path->peer->hostname); json_object_string_add(json_nexthop_ll, "afi", "ipv6"); json_object_string_add(json_nexthop_ll, "scope", diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c index d904b9f6f1..4e0642dce2 100644 --- a/bgpd/bgp_rpki.c +++ b/bgpd/bgp_rpki.c @@ -7,6 +7,7 @@ * Hamburg * Copyright (C) 2017-2018 Marcel Röthke (marcel.roethke@haw-hamburg.de), * for HAW Hamburg + * Copyright (C) 2019 6WIND * * This file is part of FRRouting. * @@ -47,6 +48,7 @@ #include "bgpd/bgp_attr.h" #include "bgpd/bgp_aspath.h" #include "bgpd/bgp_route.h" +#include "bgpd/bgp_debug.h" #include "lib/network.h" #include "lib/thread.h" #ifndef VTYSH_EXTRACT_PL @@ -60,6 +62,7 @@ #include "bgpd/bgp_rpki_clippy.c" #endif +DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_TEMP, "BGP RPKI Intermediate Buffer") DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_CACHE, "BGP RPKI Cache server") DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_CACHE_GROUP, "BGP RPKI Cache server group") @@ -67,12 +70,14 @@ DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_CACHE_GROUP, "BGP RPKI Cache server group") #define RPKI_NOTFOUND 2 #define RPKI_INVALID 3 +#define STR_SEPARATOR 10 + #define POLLING_PERIOD_DEFAULT 3600 #define EXPIRE_INTERVAL_DEFAULT 7200 #define RETRY_INTERVAL_DEFAULT 600 #define RPKI_DEBUG(...) \ - if (rpki_debug) { \ + if (rpki_debug_conf || rpki_debug_term) { \ zlog_debug("RPKI: " __VA_ARGS__); \ } @@ -87,40 +92,66 @@ struct cache { } tr_config; struct rtr_socket *rtr_socket; uint8_t preference; + struct rpki_vrf *rpki_vrf; }; enum return_values { SUCCESS = 0, ERROR = -1 }; +extern struct zebra_privs_t bgpd_privs; + struct rpki_for_each_record_arg { struct vty *vty; unsigned int *prefix_amount; as_t as; }; -static int start(void); -static void stop(void); -static int reset(bool force); -static struct rtr_mgr_group *get_connected_group(void); -static void print_prefix_table(struct vty *vty); +struct rpki_vrf { + struct rtr_mgr_config *rtr_config; + struct list *cache_list; + bool rtr_is_running; + bool rtr_is_stopping; + _Atomic int rtr_update_overflow; + unsigned int polling_period; + unsigned int expire_interval; + unsigned int retry_interval; + int rpki_sync_socket_rtr; + int rpki_sync_socket_bgpd; + char *vrfname; + QOBJ_FIELDS +}; + +static struct rpki_vrf *find_rpki_vrf(const char *vrfname); +static int bgp_rpki_vrf_update(struct vrf *vrf, bool enabled); +static int bgp_rpki_write_vrf(struct vty *vty, struct vrf *vrf); +static int bgp_rpki_hook_write_vrf(struct vty *vty, struct vrf *vrf); +static int bgp_rpki_write_debug(struct vty *vty, bool running); +static int start(struct rpki_vrf *rpki_vrf); +static void stop(struct rpki_vrf *rpki_vrf); +static int reset(bool force, struct rpki_vrf *rpki_vrf); +static struct rtr_mgr_group *get_connected_group(struct rpki_vrf *rpki_vrf); +static void print_prefix_table(struct vty *vty, struct rpki_vrf *rpki_vrf); static void install_cli_commands(void); static int config_write(struct vty *vty); static int config_on_exit(struct vty *vty); static void free_cache(struct cache *cache); -static struct rtr_mgr_group *get_groups(void); +static struct rtr_mgr_group *get_groups(struct list *cache_list); #if defined(FOUND_SSH) -static int add_ssh_cache(const char *host, const unsigned int port, +static int add_ssh_cache(struct rpki_vrf *rpki_vrf, + const char *host, + const unsigned int port, const char *username, const char *client_privkey_path, const char *client_pubkey_path, const char *server_pubkey_path, const uint8_t preference); #endif static struct rtr_socket *create_rtr_socket(struct tr_socket *tr_socket); -static struct cache *find_cache(const uint8_t preference); -static int add_tcp_cache(const char *host, const char *port, - const uint8_t preference); +static struct cache *find_cache(const uint8_t preference, + struct list *cache_list); +static int add_tcp_cache(struct rpki_vrf *rpki_vrf, const char *host, + const char *port, const uint8_t preference); static void print_record(const struct pfx_record *record, struct vty *vty); -static int is_synchronized(void); -static int is_running(void); +static int is_synchronized(struct rpki_vrf *rpki); +static int is_running(struct rpki_vrf *rpki); static void route_match_free(void *rule); static enum route_map_cmd_result_t route_match(void *rule, const struct prefix *prefix, @@ -128,19 +159,14 @@ static enum route_map_cmd_result_t route_match(void *rule, void *object); static void *route_match_compile(const char *arg); static void revalidate_bgp_node(struct bgp_dest *dest, afi_t afi, safi_t safi); -static void revalidate_all_routes(void); - -static struct rtr_mgr_config *rtr_config; -static struct list *cache_list; -static int rtr_is_running; -static int rtr_is_stopping; -static _Atomic int rtr_update_overflow; -static int rpki_debug; -static unsigned int polling_period; -static unsigned int expire_interval; -static unsigned int retry_interval; -static int rpki_sync_socket_rtr; -static int rpki_sync_socket_bgpd; +static void revalidate_all_routes(struct rpki_vrf *rpki_vrf); + +static int rpki_debug_conf, rpki_debug_term; + +DECLARE_QOBJ_TYPE(rpki_vrf) +DEFINE_QOBJ_TYPE(rpki_vrf) + +struct list *rpki_vrf_list; static struct cmd_node rpki_node = { .name = "rpki", @@ -150,6 +176,16 @@ static struct cmd_node rpki_node = { .config_write = config_write, .node_exit = config_on_exit, }; + +static struct cmd_node rpki_vrf_node = { + .name = "rpki", + .node = RPKI_VRF_NODE, + .parent_node = VRF_NODE, + .prompt = "%s(config-vrf-rpki)# ", + .config_write = config_write, + .node_exit = config_on_exit, +}; + static const struct route_map_rule_cmd route_match_rpki_cmd = { "rpki", route_match, route_match_compile, route_match_free}; @@ -259,11 +295,127 @@ static struct rtr_socket *create_rtr_socket(struct tr_socket *tr_socket) return rtr_socket; } -static struct cache *find_cache(const uint8_t preference) +static int bgp_rpki_vrf_update(struct vrf *vrf, bool enabled) +{ + struct rpki_vrf *rpki; + + if (vrf->vrf_id == VRF_DEFAULT) + rpki = find_rpki_vrf(NULL); + else + rpki = find_rpki_vrf(vrf->name); + if (!rpki) + return 0; + + if (enabled) + start(rpki); + else + stop(rpki); + return 1; +} + +/* tcp identifier : <HOST>:<PORT> + * ssh identifier : <user>@<HOST>:<PORT> + */ +static struct rpki_vrf *find_rpki_vrf_from_ident(const char *ident) +{ + char *ptr; + unsigned int port; + char *endptr; + struct listnode *rpki_vrf_nnode; + struct rpki_vrf *rpki_vrf; + struct listnode *cache_node; + struct cache *cache; + char *buf, *host; + bool is_tcp = true; + size_t host_len; + + /* extract the <SOCKET> */ + ptr = strrchr(ident, ':'); + if (!ptr) + return NULL; + ptr++; + /* extract port */ + port = atoi(ptr); + if (port == 0) + /* not ours */ + return NULL; + /* extract host */ + ptr--; + host_len = (size_t)(ptr - ident); + buf = XCALLOC(MTYPE_BGP_RPKI_TEMP, host_len + 1); + memcpy(buf, ident, host_len); + buf[host_len] = '\0'; + endptr = strrchr(buf, '@'); + /* ssh session */ + if (endptr) { + host = XCALLOC(MTYPE_BGP_RPKI_TEMP, (size_t)(buf + host_len - endptr) + 1); + memcpy(host, endptr + 1, (size_t)(buf + host_len - endptr) + 1); + is_tcp = false; + } else { + host = buf; + buf = NULL; + } + + for (ALL_LIST_ELEMENTS_RO(rpki_vrf_list, rpki_vrf_nnode, rpki_vrf)) { + for (ALL_LIST_ELEMENTS_RO(rpki_vrf->cache_list, + cache_node, cache)) { + if ((cache->type == TCP && !is_tcp) || + (cache->type == SSH && is_tcp)) + continue; + if (is_tcp) { + struct tr_tcp_config *tcp_config = cache->tr_config.tcp_config; + unsigned int cache_port; + + cache_port = atoi(tcp_config->port); + if (cache_port != port) + continue; + if (strlen(tcp_config->host) != strlen(host)) + continue; + if (0 == memcmp(tcp_config->host, host, host_len)) + break; + } else { + struct tr_ssh_config *ssh_config = cache->tr_config.ssh_config; + + if (port != ssh_config->port) + continue; + if (strmatch(ssh_config->host, host)) + break; + } + } + if (cache) + break; + } + if (host) + XFREE(MTYPE_BGP_RPKI_TEMP, host); + if (buf) + XFREE(MTYPE_BGP_RPKI_TEMP, buf); + return rpki_vrf; +} + +static struct rpki_vrf *find_rpki_vrf(const char *vrfname) +{ + struct listnode *rpki_vrf_nnode; + struct rpki_vrf *rpki_vrf; + + for (ALL_LIST_ELEMENTS_RO(rpki_vrf_list, rpki_vrf_nnode, rpki_vrf)) { + if ((!vrfname && rpki_vrf->vrfname) || + (vrfname && !rpki_vrf->vrfname) || + (vrfname && rpki_vrf->vrfname && + !strmatch(vrfname, rpki_vrf->vrfname))) + continue; + return rpki_vrf; + } + return NULL; +} + +static struct cache *find_cache(const uint8_t preference, + struct list *cache_list) { struct listnode *cache_node; struct cache *cache; + if (!cache_list) + return NULL; for (ALL_LIST_ELEMENTS_RO(cache_list, cache_node, cache)) { if (cache->preference == preference) return cache; @@ -301,14 +453,14 @@ static void print_record_cb(const struct pfx_record *record, void *data) print_record(record, vty); } -static struct rtr_mgr_group *get_groups(void) +static struct rtr_mgr_group *get_groups(struct list *cache_list) { struct listnode *cache_node; struct rtr_mgr_group *rtr_mgr_groups; struct cache *cache; + int group_count; - int group_count = listcount(cache_list); - + group_count = listcount(cache_list); if (group_count == 0) return NULL; @@ -330,14 +482,15 @@ static struct rtr_mgr_group *get_groups(void) return rtr_mgr_groups; } -inline int is_synchronized(void) +inline int is_synchronized(struct rpki_vrf *rpki_vrf) { - return rtr_is_running && rtr_mgr_conf_in_sync(rtr_config); + return rpki_vrf->rtr_is_running && + rtr_mgr_conf_in_sync(rpki_vrf->rtr_config); } -inline int is_running(void) +inline int is_running(struct rpki_vrf *rpki_vrf) { - return rtr_is_running; + return rpki_vrf->rtr_is_running; } static struct prefix *pfx_record_to_prefix(struct pfx_record *record) @@ -364,24 +517,28 @@ static int bgpd_sync_callback(struct thread *thread) struct listnode *node; struct prefix *prefix; struct pfx_record rec; + struct rpki_vrf *rpki_vrf = THREAD_ARG(thread); + struct vrf *vrf = NULL; - thread_add_read(bm->master, bgpd_sync_callback, NULL, - rpki_sync_socket_bgpd, NULL); + thread_add_read(bm->master, bgpd_sync_callback, rpki_vrf, + rpki_vrf->rpki_sync_socket_bgpd, NULL); - if (atomic_load_explicit(&rtr_update_overflow, memory_order_seq_cst)) { - while (read(rpki_sync_socket_bgpd, &rec, + if (atomic_load_explicit(&rpki_vrf->rtr_update_overflow, + memory_order_seq_cst)) { + while (read(rpki_vrf->rpki_sync_socket_bgpd, &rec, sizeof(struct pfx_record)) != -1) ; - atomic_store_explicit(&rtr_update_overflow, 0, + atomic_store_explicit(&rpki_vrf->rtr_update_overflow, 0, memory_order_seq_cst); - revalidate_all_routes(); + revalidate_all_routes(rpki_vrf); return 0; } int retval = - read(rpki_sync_socket_bgpd, &rec, sizeof(struct pfx_record)); + read(rpki_vrf->rpki_sync_socket_bgpd, &rec, + sizeof(struct pfx_record)); if (retval != sizeof(struct pfx_record)) { RPKI_DEBUG("Could not read from rpki_sync_socket_bgpd"); return retval; @@ -390,10 +547,24 @@ static int bgpd_sync_callback(struct thread *thread) afi_t afi = (rec.prefix.ver == LRTR_IPV4) ? AFI_IP : AFI_IP6; + if (rpki_vrf->vrfname) { + vrf = vrf_lookup_by_name(rpki_vrf->vrfname); + if (!vrf) { + zlog_err("%s(): vrf for rpki %s not found", + __func__, rpki_vrf->vrfname); + return 0; + } + } + for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) { struct peer *peer; struct listnode *peer_listnode; + if (!vrf && bgp->vrf_id != VRF_DEFAULT) + continue; + if (vrf && bgp->vrf_id != vrf->vrf_id) + continue; + for (ALL_LIST_ELEMENTS_RO(bgp->peer, peer_listnode, peer)) { safi_t safi; @@ -452,15 +623,30 @@ static void revalidate_bgp_node(struct bgp_dest *bgp_dest, afi_t afi, } } -static void revalidate_all_routes(void) +static void revalidate_all_routes(struct rpki_vrf *rpki_vrf) { struct bgp *bgp; struct listnode *node; + struct vrf *vrf = NULL; + + if (rpki_vrf->vrfname) { + vrf = vrf_lookup_by_name(rpki_vrf->vrfname); + if (!vrf) { + zlog_err("%s(): vrf for rpki %s not found", + __func__, rpki_vrf->vrfname); + return; + } + } for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) { struct peer *peer; struct listnode *peer_listnode; + if (!vrf && bgp->vrf_id != VRF_DEFAULT) + continue; + if (vrf && bgp->vrf_id != vrf->vrf_id) + continue; + for (ALL_LIST_ELEMENTS_RO(bgp->peer, peer_listnode, peer)) { for (size_t i = 0; i < 2; i++) { @@ -483,21 +669,51 @@ static void rpki_update_cb_sync_rtr(struct pfx_table *p __attribute__((unused)), const struct pfx_record rec, const bool added __attribute__((unused))) { - if (rtr_is_stopping - || atomic_load_explicit(&rtr_update_overflow, memory_order_seq_cst)) - return; + struct rpki_vrf *rpki_vrf; + const char *msg; + const struct rtr_socket *rtr = rec.socket; + struct tr_socket *tr; + const char *ident; + int retval; - int retval = - write(rpki_sync_socket_rtr, &rec, sizeof(struct pfx_record)); + if (!rtr) { + msg = "could not find rtr_socket from cb_sync_rtr"; + goto err; + } + tr = rtr->tr_socket; + if (!tr) { + msg = "could not find tr_socket from cb_sync_rtr"; + goto err; + } + ident = tr->ident_fp(tr->socket); + if (!ident) { + msg = "could not find rpki_vrf ident"; + goto err; + } + rpki_vrf = find_rpki_vrf_from_ident(ident); + if (!rpki_vrf) { + msg = "could not find rpki_vrf"; + goto err; + } + if (rpki_vrf->rtr_is_stopping + || atomic_load_explicit(&rpki_vrf->rtr_update_overflow, + memory_order_seq_cst)) + return; + retval = + write(rpki_vrf->rpki_sync_socket_rtr, &rec, + sizeof(struct pfx_record)); if (retval == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) - atomic_store_explicit(&rtr_update_overflow, 1, + atomic_store_explicit(&rpki_vrf->rtr_update_overflow, 1, memory_order_seq_cst); else if (retval != sizeof(struct pfx_record)) RPKI_DEBUG("Could not write to rpki_sync_socket_rtr"); + return; +err: + zlog_err("RPKI: %s", msg); } -static void rpki_init_sync_socket(void) +static void rpki_init_sync_socket(struct rpki_vrf *rpki_vrf) { int fds[2]; const char *msg; @@ -507,22 +723,22 @@ static void rpki_init_sync_socket(void) msg = "could not open rpki sync socketpair"; goto err; } - rpki_sync_socket_rtr = fds[0]; - rpki_sync_socket_bgpd = fds[1]; + rpki_vrf->rpki_sync_socket_rtr = fds[0]; + rpki_vrf->rpki_sync_socket_bgpd = fds[1]; - if (set_nonblocking(rpki_sync_socket_rtr) != 0) { + if (set_nonblocking(rpki_vrf->rpki_sync_socket_rtr) != 0) { msg = "could not set rpki_sync_socket_rtr to non blocking"; goto err; } - if (set_nonblocking(rpki_sync_socket_bgpd) != 0) { + if (set_nonblocking(rpki_vrf->rpki_sync_socket_bgpd) != 0) { msg = "could not set rpki_sync_socket_bgpd to non blocking"; goto err; } - thread_add_read(bm->master, bgpd_sync_callback, NULL, - rpki_sync_socket_bgpd, NULL); + thread_add_read(bm->master, bgpd_sync_callback, rpki_vrf, + rpki_vrf->rpki_sync_socket_bgpd, NULL); return; @@ -532,30 +748,64 @@ err: } -static int bgp_rpki_init(struct thread_master *master) +static struct rpki_vrf *bgp_rpki_allocate(const char *vrfname) { - rpki_debug = 0; - rtr_is_running = 0; - rtr_is_stopping = 0; + struct rpki_vrf *rpki_vrf; + + rpki_vrf = XCALLOC(MTYPE_BGP_RPKI_CACHE, + sizeof(struct rpki_vrf)); + + rpki_vrf->rtr_is_running = false; + rpki_vrf->rtr_is_stopping = false; + rpki_vrf->cache_list = list_new(); + rpki_vrf->cache_list->del = (void (*)(void *)) & free_cache; + rpki_vrf->polling_period = POLLING_PERIOD_DEFAULT; + rpki_vrf->expire_interval = EXPIRE_INTERVAL_DEFAULT; + rpki_vrf->retry_interval = RETRY_INTERVAL_DEFAULT; + + if (vrfname && !strmatch(vrfname, VRF_DEFAULT_NAME)) + rpki_vrf->vrfname = XSTRDUP(MTYPE_BGP_RPKI_CACHE, + vrfname); + QOBJ_REG(rpki_vrf, rpki_vrf); + listnode_add(rpki_vrf_list, rpki_vrf); + return rpki_vrf; +} - cache_list = list_new(); - cache_list->del = (void (*)(void *)) & free_cache; +static int bgp_rpki_init(struct thread_master *master) +{ + rpki_debug_conf = 0; + rpki_debug_term = 0; - polling_period = POLLING_PERIOD_DEFAULT; - expire_interval = EXPIRE_INTERVAL_DEFAULT; - retry_interval = RETRY_INTERVAL_DEFAULT; + rpki_vrf_list = list_new(); install_cli_commands(); - rpki_init_sync_socket(); + return 0; } +static void bgp_rpki_finish(struct rpki_vrf *rpki_vrf) +{ + stop(rpki_vrf); + list_delete(&rpki_vrf->cache_list); + + close(rpki_vrf->rpki_sync_socket_rtr); + close(rpki_vrf->rpki_sync_socket_bgpd); + + listnode_delete(rpki_vrf_list, rpki_vrf); + QOBJ_UNREG(rpki_vrf); + if (rpki_vrf->vrfname) + XFREE(MTYPE_BGP_RPKI_CACHE, rpki_vrf->vrfname); + XFREE(MTYPE_BGP_RPKI_CACHE, rpki_vrf); +} + static int bgp_rpki_fini(void) { - stop(); - list_delete(&cache_list); + struct rpki_vrf *rpki_vrf; - close(rpki_sync_socket_rtr); - close(rpki_sync_socket_bgpd); + /* assume default vrf */ + rpki_vrf = find_rpki_vrf(NULL); + if (!rpki_vrf) + return 0; + bgp_rpki_finish(rpki_vrf); return 0; } @@ -566,87 +816,113 @@ static int bgp_rpki_module_init(void) hook_register(frr_late_init, bgp_rpki_init); hook_register(frr_early_fini, &bgp_rpki_fini); + hook_register(bgp_hook_config_write_debug, &bgp_rpki_write_debug); + hook_register(bgp_hook_vrf_update, &bgp_rpki_vrf_update); + hook_register(bgp_hook_config_write_vrf, &bgp_rpki_hook_write_vrf); return 0; } -static int start(void) +static int start(struct rpki_vrf *rpki_vrf) { int ret; + struct list *cache_list = NULL; + struct vrf *vrf; - rtr_is_stopping = 0; - rtr_update_overflow = 0; + cache_list = rpki_vrf->cache_list; + rpki_vrf->rtr_is_stopping = false; + rpki_vrf->rtr_update_overflow = 0; - if (list_isempty(cache_list)) { - RPKI_DEBUG( - "No caches were found in config. Prefix validation is off."); + if (!cache_list || list_isempty(cache_list)) { + RPKI_DEBUG("No caches were found in config." + "Prefix validation is off."); + return ERROR; + } + + if (rpki_vrf->vrfname) + vrf = vrf_lookup_by_name(rpki_vrf->vrfname); + else + vrf = vrf_lookup_by_id(VRF_DEFAULT); + if (!vrf || !CHECK_FLAG(vrf->status, VRF_ACTIVE)) { + RPKI_DEBUG("VRF %s not present or disabled", + rpki_vrf->vrfname); return ERROR; } - RPKI_DEBUG("Init rtr_mgr."); + + RPKI_DEBUG("Init rtr_mgr (%s).", vrf->name); int groups_len = listcount(cache_list); - struct rtr_mgr_group *groups = get_groups(); + struct rtr_mgr_group *groups = get_groups(rpki_vrf->cache_list); - RPKI_DEBUG("Polling period: %d", polling_period); - ret = rtr_mgr_init(&rtr_config, groups, groups_len, polling_period, - expire_interval, retry_interval, - rpki_update_cb_sync_rtr, NULL, NULL, NULL); + RPKI_DEBUG("Polling period: %d", rpki_vrf->polling_period); + ret = rtr_mgr_init(&rpki_vrf->rtr_config, groups, groups_len, + rpki_vrf->polling_period, rpki_vrf->expire_interval, + rpki_vrf->retry_interval, rpki_update_cb_sync_rtr, + NULL, NULL, NULL); if (ret == RTR_ERROR) { - RPKI_DEBUG("Init rtr_mgr failed."); + RPKI_DEBUG("Init rtr_mgr failed (%s).", vrf->name); return ERROR; } - RPKI_DEBUG("Starting rtr_mgr."); - ret = rtr_mgr_start(rtr_config); + RPKI_DEBUG("Starting rtr_mgr (%s).", vrf->name); + ret = rtr_mgr_start(rpki_vrf->rtr_config); if (ret == RTR_ERROR) { - RPKI_DEBUG("Starting rtr_mgr failed."); - rtr_mgr_free(rtr_config); + RPKI_DEBUG("Starting rtr_mgr failed (%s).", vrf->name); + rtr_mgr_free(rpki_vrf->rtr_config); return ERROR; } - rtr_is_running = 1; + rpki_vrf->rtr_is_running = true; XFREE(MTYPE_BGP_RPKI_CACHE_GROUP, groups); return SUCCESS; } -static void stop(void) +static void stop(struct rpki_vrf *rpki_vrf) { - rtr_is_stopping = 1; - if (rtr_is_running) { - rtr_mgr_stop(rtr_config); - rtr_mgr_free(rtr_config); - rtr_is_running = 0; + rpki_vrf->rtr_is_stopping = true; + if (rpki_vrf->rtr_is_running) { + rtr_mgr_stop(rpki_vrf->rtr_config); + rtr_mgr_free(rpki_vrf->rtr_config); + rpki_vrf->rtr_is_running = false; } } -static int reset(bool force) +static int reset(bool force, struct rpki_vrf *rpki_vrf) { - if (rtr_is_running && !force) + if (rpki_vrf->rtr_is_running && !force) return SUCCESS; RPKI_DEBUG("Resetting RPKI Session"); - stop(); - return start(); + stop(rpki_vrf); + return start(rpki_vrf); } -static struct rtr_mgr_group *get_connected_group(void) +static struct rtr_mgr_group *get_connected_group(struct rpki_vrf *rpki_vrf) { + struct list *cache_list; + + if (!rpki_vrf) + return NULL; + cache_list = rpki_vrf->cache_list; if (!cache_list || list_isempty(cache_list)) return NULL; - return rtr_mgr_get_first_group(rtr_config); + return rtr_mgr_get_first_group(rpki_vrf->rtr_config); } -static void print_prefix_table_by_asn(struct vty *vty, as_t as) +static void print_prefix_table_by_asn(struct vty *vty, as_t as, struct rpki_vrf *rpki_vrf) { unsigned int number_of_ipv4_prefixes = 0; unsigned int number_of_ipv6_prefixes = 0; - struct rtr_mgr_group *group = get_connected_group(); + struct rtr_mgr_group *group = get_connected_group(rpki_vrf); struct rpki_for_each_record_arg arg; arg.vty = vty; arg.as = as; + if (!rpki_vrf) + return; + if (!group) { vty_out(vty, "Cannot find a connected group.\n"); return; @@ -667,14 +943,17 @@ static void print_prefix_table_by_asn(struct vty *vty, as_t as) vty_out(vty, "Number of IPv6 Prefixes: %u\n", number_of_ipv6_prefixes); } -static void print_prefix_table(struct vty *vty) +static void print_prefix_table(struct vty *vty, struct rpki_vrf *rpki_vrf) { struct rpki_for_each_record_arg arg; unsigned int number_of_ipv4_prefixes = 0; unsigned int number_of_ipv6_prefixes = 0; - struct rtr_mgr_group *group = get_connected_group(); + struct rtr_mgr_group *group; + if (!rpki_vrf) + return; + group = get_connected_group(rpki_vrf); arg.vty = vty; if (!group) @@ -704,8 +983,20 @@ static int rpki_validate_prefix(struct peer *peer, struct attr *attr, enum pfxv_state result; char buf[BUFSIZ]; const char *prefix_string; + struct bgp *bgp = peer->bgp; + struct vrf *vrf; + struct rpki_vrf *rpki_vrf; - if (!is_synchronized()) + if (!bgp) + return 0; + vrf = vrf_lookup_by_id(bgp->vrf_id); + if (!vrf) + return 0; + if (vrf->vrf_id == VRF_DEFAULT) + rpki_vrf = find_rpki_vrf(NULL); + else + rpki_vrf = find_rpki_vrf(vrf->name); + if (!rpki_vrf || !is_synchronized(rpki_vrf)) return 0; // No aspath means route comes from iBGP @@ -750,7 +1041,7 @@ static int rpki_validate_prefix(struct peer *peer, struct attr *attr, } // Do the actual validation - rtr_mgr_validate(rtr_config, as_number, &ip_addr_prefix, + rtr_mgr_validate(rpki_vrf->rtr_config, as_number, &ip_addr_prefix, prefix->prefixlen, &result); // Print Debug output @@ -784,15 +1075,26 @@ static int add_cache(struct cache *cache) { uint8_t preference = cache->preference; struct rtr_mgr_group group; + struct list *cache_list; + struct rpki_vrf *rpki_vrf; + + rpki_vrf = cache->rpki_vrf; + if (!rpki_vrf) + return ERROR; group.preference = preference; group.sockets_len = 1; group.sockets = &cache->rtr_socket; - if (rtr_is_running) { + cache_list = rpki_vrf->cache_list; + if (!cache_list) + return ERROR; + + if (rpki_vrf->rtr_is_running) { init_tr_socket(cache); - if (rtr_mgr_add_group(rtr_config, &group) != RTR_SUCCESS) { + if (rtr_mgr_add_group(rpki_vrf->rtr_config, &group) + != RTR_SUCCESS) { free_tr_socket(cache); return ERROR; } @@ -803,57 +1105,160 @@ static int add_cache(struct cache *cache) return SUCCESS; } -static int add_tcp_cache(const char *host, const char *port, - const uint8_t preference) +static int rpki_create_socket(struct cache *cache) +{ + struct vrf *vrf; + int socket; + struct addrinfo hints; + struct addrinfo *res = NULL; + char *host, *port; + struct rpki_vrf *rpki_vrf = cache->rpki_vrf; + int ret; + + if (rpki_vrf->vrfname == NULL) + vrf = vrf_lookup_by_id(VRF_DEFAULT); + else + vrf = vrf_lookup_by_name(rpki_vrf->vrfname); + if (!vrf) + return 0; + + if (!CHECK_FLAG(vrf->status, VRF_ACTIVE) || + vrf->vrf_id == VRF_UNKNOWN) + return 0; + + bzero(&hints, sizeof(hints)); + + if (cache->type == TCP) { + struct tr_tcp_config *tcp_config; + + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + hints.ai_flags = AI_ADDRCONFIG; + + tcp_config = cache->tr_config.tcp_config; + host = tcp_config->host; + port = tcp_config->port; + } else { + char s_port[10]; + struct tr_ssh_config *ssh_config; + + ssh_config = cache->tr_config.ssh_config; + host = ssh_config->host; + snprintf(s_port, sizeof(s_port), "%hu", + ssh_config->port); + port = s_port; + + hints.ai_flags |= AI_NUMERICHOST; + hints.ai_protocol = IPPROTO_TCP; + hints.ai_family = PF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + } + frr_with_privs(&bgpd_privs) { + ret = vrf_getaddrinfo(host, port, + &hints, &res, vrf->vrf_id); + } + if (ret != 0) { + zlog_err("getaddrinfo error, %u", errno); + return 0; + } + frr_with_privs(&bgpd_privs) { + socket = vrf_socket(res->ai_family, res->ai_socktype, + res->ai_protocol, vrf->vrf_id, NULL); + } + if (socket <= 0) { + zlog_err("vrf socket error, %u", errno); + return 0; + } + + if (connect(socket, res->ai_addr, res->ai_addrlen) == -1) { + zlog_err("Couldn't establish TCP connection, %s", strerror(errno)); + if (res) + freeaddrinfo(res); + return 0; + } + if (res) + freeaddrinfo(res); + return socket; +} + +static int rpki_get_socket(void *_cache) +{ + int sock; + struct cache *cache = (struct cache *)_cache; + + if (!cache) + return -1; + sock = rpki_create_socket(cache); + if (sock <= 0) + return -1; + return sock; +} + +static int add_tcp_cache(struct rpki_vrf *rpki_vrf, const char *host, + const char *port, const uint8_t preference) { struct rtr_socket *rtr_socket; - struct tr_tcp_config *tcp_config = - XCALLOC(MTYPE_BGP_RPKI_CACHE, sizeof(struct tr_tcp_config)); - struct tr_socket *tr_socket = - XMALLOC(MTYPE_BGP_RPKI_CACHE, sizeof(struct tr_socket)); - struct cache *cache = - XMALLOC(MTYPE_BGP_RPKI_CACHE, sizeof(struct cache)); + struct tr_tcp_config *tcp_config; + struct tr_socket *tr_socket; + struct cache *cache; + int ret; + + tcp_config = XCALLOC(MTYPE_BGP_RPKI_CACHE, + sizeof(struct tr_tcp_config)); + tr_socket = XCALLOC(MTYPE_BGP_RPKI_CACHE, sizeof(struct tr_socket)); + cache = XCALLOC(MTYPE_BGP_RPKI_CACHE, sizeof(struct cache)); tcp_config->host = XSTRDUP(MTYPE_BGP_RPKI_CACHE, host); tcp_config->port = XSTRDUP(MTYPE_BGP_RPKI_CACHE, port); tcp_config->bindaddr = NULL; - + tcp_config->data = cache; + tcp_config->new_socket = rpki_get_socket; rtr_socket = create_rtr_socket(tr_socket); + cache->rpki_vrf = rpki_vrf; cache->type = TCP; cache->tr_socket = tr_socket; cache->tr_config.tcp_config = tcp_config; cache->rtr_socket = rtr_socket; cache->preference = preference; - int ret = add_cache(cache); + ret = add_cache(cache); if (ret != SUCCESS) { free_cache(cache); } - return ret; } #if defined(FOUND_SSH) -static int add_ssh_cache(const char *host, const unsigned int port, +static int add_ssh_cache(struct rpki_vrf *rpki_vrf, + const char *host, + const unsigned int port, const char *username, const char *client_privkey_path, const char *client_pubkey_path, const char *server_pubkey_path, const uint8_t preference) { - struct tr_ssh_config *ssh_config = - XCALLOC(MTYPE_BGP_RPKI_CACHE, sizeof(struct tr_ssh_config)); - struct cache *cache = - XMALLOC(MTYPE_BGP_RPKI_CACHE, sizeof(struct cache)); - struct tr_socket *tr_socket = - XMALLOC(MTYPE_BGP_RPKI_CACHE, sizeof(struct tr_socket)); + struct tr_ssh_config *ssh_config; + struct cache *cache; + struct tr_socket *tr_socket; struct rtr_socket *rtr_socket; + int ret; + + ssh_config = XCALLOC(MTYPE_BGP_RPKI_CACHE, + sizeof(struct tr_ssh_config)); + cache = XCALLOC(MTYPE_BGP_RPKI_CACHE, sizeof(struct cache)); + tr_socket = XCALLOC(MTYPE_BGP_RPKI_CACHE, sizeof(struct tr_socket)); ssh_config->port = port; ssh_config->host = XSTRDUP(MTYPE_BGP_RPKI_CACHE, host); ssh_config->bindaddr = NULL; + ssh_config->data = cache; + ssh_config->new_socket = rpki_get_socket; ssh_config->username = XSTRDUP(MTYPE_BGP_RPKI_CACHE, username); + /* public key path is derived from private key path + * by appending '.pub' to the private key name + */ ssh_config->client_privkey_path = XSTRDUP(MTYPE_BGP_RPKI_CACHE, client_privkey_path); ssh_config->server_hostkey_path = @@ -861,13 +1266,14 @@ static int add_ssh_cache(const char *host, const unsigned int port, rtr_socket = create_rtr_socket(tr_socket); + cache->rpki_vrf = rpki_vrf; cache->type = SSH; cache->tr_socket = tr_socket; cache->tr_config.ssh_config = ssh_config; cache->rtr_socket = rtr_socket; cache->preference = preference; - int ret = add_cache(cache); + ret = add_cache(cache); if (ret != SUCCESS) { free_cache(cache); } @@ -900,53 +1306,128 @@ static void free_cache(struct cache *cache) XFREE(MTYPE_BGP_RPKI_CACHE, cache); } -static int config_write(struct vty *vty) +/* return true if config changed from default */ +static bool config_changed(struct rpki_vrf *rpki_vrf) { - struct listnode *cache_node; - struct cache *cache; + if (rpki_vrf->cache_list && listcount(rpki_vrf->cache_list)) + return true; + if (rpki_vrf->polling_period != POLLING_PERIOD_DEFAULT) + return true; + if (rpki_vrf->retry_interval != RETRY_INTERVAL_DEFAULT) + return true; + if (rpki_vrf->expire_interval != EXPIRE_INTERVAL_DEFAULT) + return true; + return false; +} - if (listcount(cache_list)) { - if (rpki_debug) - vty_out(vty, "debug rpki\n"); +static int bgp_rpki_write_debug(struct vty *vty, bool running) +{ + if (rpki_debug_conf && running) { + vty_out(vty, "debug rpki\n"); + return 1; + } + if ((rpki_debug_conf || rpki_debug_term) && !running) { + vty_out(vty, " BGP RPKI debugging is on\n"); + return 1; + } + return 0; +} - vty_out(vty, "!\n"); - vty_out(vty, "rpki\n"); - vty_out(vty, " rpki polling_period %d\n", polling_period); - for (ALL_LIST_ELEMENTS_RO(cache_list, cache_node, cache)) { - switch (cache->type) { - struct tr_tcp_config *tcp_config; +static int bgp_rpki_hook_write_vrf(struct vty *vty, struct vrf *vrf) +{ + int ret; + + ret = bgp_rpki_write_vrf(vty, vrf); + if (ret == ERROR) + return 0; + return ret; +} + +static int bgp_rpki_write_vrf(struct vty *vty, struct vrf *vrf) +{ + struct listnode *cache_node; + struct cache *cache; + struct rpki_vrf *rpki_vrf = NULL; + char sep[STR_SEPARATOR]; + vrf_id_t vrf_id = VRF_DEFAULT; + char *host_key_pub = NULL; + int len_host_key_pub; + + if (!vrf) { + rpki_vrf = find_rpki_vrf(NULL); + snprintf(sep, sizeof(sep), "%s", ""); + } else if (vrf->vrf_id != VRF_DEFAULT) { + rpki_vrf = find_rpki_vrf(vrf->name); + snprintf(sep, sizeof(sep), "%s", " "); + vrf_id = vrf->vrf_id; + } else + return ERROR; + if (!rpki_vrf) + return ERROR; + if (!config_changed(rpki_vrf)) + return 0; + if (vrf_id == VRF_DEFAULT) + vty_out(vty, "%s!\n", sep); + vty_out(vty, "%srpki\n", sep); + if (rpki_vrf->polling_period != POLLING_PERIOD_DEFAULT) + vty_out(vty, "%s rpki polling_period %d\n", + sep, rpki_vrf->polling_period); + if (rpki_vrf->retry_interval != RETRY_INTERVAL_DEFAULT) + vty_out(vty, "%s rpki retry-interval %d\n", + sep, rpki_vrf->retry_interval); + if (rpki_vrf->expire_interval != EXPIRE_INTERVAL_DEFAULT) + vty_out(vty, "%s rpki expire_interval %d\n", + sep, rpki_vrf->expire_interval); + + for (ALL_LIST_ELEMENTS_RO(rpki_vrf->cache_list, cache_node, cache)) { + switch (cache->type) { + struct tr_tcp_config *tcp_config; #if defined(FOUND_SSH) - struct tr_ssh_config *ssh_config; + struct tr_ssh_config *ssh_config; #endif - case TCP: - tcp_config = cache->tr_config.tcp_config; - vty_out(vty, " rpki cache %s %s ", - tcp_config->host, tcp_config->port); - break; + case TCP: + tcp_config = cache->tr_config.tcp_config; + vty_out(vty, "%s rpki cache %s %s ", sep, + tcp_config->host, tcp_config->port); + break; #if defined(FOUND_SSH) - case SSH: - ssh_config = cache->tr_config.ssh_config; - vty_out(vty, " rpki cache %s %u %s %s %s ", - ssh_config->host, ssh_config->port, - ssh_config->username, - ssh_config->client_privkey_path, - ssh_config->server_hostkey_path != NULL - ? ssh_config - ->server_hostkey_path - : " "); - break; -#endif - default: - break; + case SSH: + ssh_config = cache->tr_config.ssh_config; + if (ssh_config->client_privkey_path) { + len_host_key_pub = strlen(ssh_config->client_privkey_path) + 4 /* strlen(".pub")*/ + 1; + host_key_pub = XCALLOC(MTYPE_BGP_RPKI_CACHE, len_host_key_pub); + snprintf(host_key_pub, len_host_key_pub, "%s.pub", ssh_config->client_privkey_path); } - - vty_out(vty, "preference %hhu\n", cache->preference); + vty_out(vty, "%s rpki cache %s %u %s %s %s %s ", + sep, ssh_config->host, + ssh_config->port, + ssh_config->username, + ssh_config->client_privkey_path, + host_key_pub ? host_key_pub : "", + ssh_config->server_hostkey_path != NULL + ? ssh_config + ->server_hostkey_path + : ""); + if (host_key_pub) { + XFREE(MTYPE_BGP_RPKI_CACHE, host_key_pub); + host_key_pub = NULL; + } + break; +#endif + default: + break; } - vty_out(vty, " exit\n"); - return 1; - } else { - return 0; + + vty_out(vty, "preference %hhu\n", cache->preference); } + vty_out(vty, "%s exit\n%s", sep, + vrf_id == VRF_DEFAULT ? "!\n" : ""); + return 1; +} + +static int config_write(struct vty *vty) +{ + return bgp_rpki_write_vrf(vty, NULL); } DEFUN_NOSH (rpki, @@ -954,22 +1435,85 @@ DEFUN_NOSH (rpki, "rpki", "Enable rpki and enter rpki configuration mode\n") { - vty->node = RPKI_NODE; + struct rpki_vrf *rpki_vrf; + char *vrfname = NULL; + + if (vty->node == CONFIG_NODE) + vty->node = RPKI_NODE; + else { + struct vrf *vrf = VTY_GET_CONTEXT(vrf); + + vty->node = RPKI_VRF_NODE; + if (vrf->vrf_id != VRF_DEFAULT) + vrfname = vrf->name; + } + /* assume default vrf */ + rpki_vrf = find_rpki_vrf(vrfname); + if (!rpki_vrf) { + rpki_vrf = bgp_rpki_allocate(vrfname); + + rpki_init_sync_socket(rpki_vrf); + } + if (vty->node == RPKI_VRF_NODE) + VTY_PUSH_CONTEXT_SUB(vty->node, rpki_vrf); + else + VTY_PUSH_CONTEXT(vty->node, rpki_vrf); + return CMD_SUCCESS; +} + +DEFUN_NOSH (no_rpki, + no_rpki_cmd, + "no rpki", + NO_STR + "Enable rpki and enter rpki configuration mode\n") +{ + struct rpki_vrf *rpki_vrf; + char *vrfname = NULL; + + if (vty->node == VRF_NODE) { + VTY_DECLVAR_CONTEXT(vrf, vrf); + + if (vrf->vrf_id != VRF_DEFAULT) + vrfname = vrf->name; + } + + rpki_vrf = find_rpki_vrf(vrfname); + + if (rpki_vrf) + bgp_rpki_finish(rpki_vrf); return CMD_SUCCESS; } DEFUN (bgp_rpki_start, bgp_rpki_start_cmd, - "rpki start", + "rpki start [vrf NAME]", RPKI_OUTPUT_STRING - "start rpki support\n") -{ - if (listcount(cache_list) == 0) - vty_out(vty, - "Could not start rpki because no caches are configured\n"); + "start rpki support\n" + VRF_CMD_HELP_STR) +{ + struct list *cache_list = NULL; + struct rpki_vrf *rpki_vrf; + int idx_vrf = 3; + struct vrf *vrf; + char *vrfname = NULL; + + if (argc == 4) { + vrf = vrf_lookup_by_name(argv[idx_vrf]->arg); + if (!vrf) + return CMD_SUCCESS; + if (vrf->vrf_id != VRF_DEFAULT) + vrfname = vrf->name; + } + rpki_vrf = find_rpki_vrf(vrfname); + if (!rpki_vrf) + return CMD_SUCCESS; + cache_list = rpki_vrf->cache_list; + if (!cache_list || listcount(cache_list) == 0) + vty_out(vty, "Could not start rpki" + " because no caches are configured\n"); - if (!is_running()) { - if (start() == ERROR) { + if (!is_running(rpki_vrf)) { + if (start(rpki_vrf) == ERROR) { RPKI_DEBUG("RPKI failed to start"); return CMD_WARNING; } @@ -979,12 +1523,26 @@ DEFUN (bgp_rpki_start, DEFUN (bgp_rpki_stop, bgp_rpki_stop_cmd, - "rpki stop", + "rpki stop [vrf NAME]", RPKI_OUTPUT_STRING - "start rpki support\n") + "start rpki support\n" + VRF_CMD_HELP_STR) { - if (is_running()) - stop(); + int idx_vrf = 3; + struct vrf *vrf; + char *vrfname = NULL; + struct rpki_vrf *rpki_vrf; + + if (argc == 4) { + vrf = vrf_lookup_by_name(argv[idx_vrf]->arg); + if (!vrf) + return CMD_SUCCESS; + if (vrf->vrf_id != VRF_DEFAULT) + vrfname = vrf->name; + } + rpki_vrf = find_rpki_vrf(vrfname); + if (rpki_vrf && is_running(rpki_vrf)) + stop(rpki_vrf); return CMD_SUCCESS; } @@ -996,7 +1554,14 @@ DEFPY (rpki_polling_period, "Set polling period\n" "Polling period value\n") { - polling_period = pp; + struct rpki_vrf *rpki_vrf; + + if (vty->node == RPKI_VRF_NODE) + rpki_vrf = VTY_GET_CONTEXT_SUB(rpki_vrf); + else + rpki_vrf = VTY_GET_CONTEXT(rpki_vrf); + + rpki_vrf->polling_period = pp; return CMD_SUCCESS; } @@ -1007,7 +1572,14 @@ DEFUN (no_rpki_polling_period, RPKI_OUTPUT_STRING "Set polling period back to default\n") { - polling_period = POLLING_PERIOD_DEFAULT; + struct rpki_vrf *rpki_vrf; + + if (vty->node == RPKI_VRF_NODE) + rpki_vrf = VTY_GET_CONTEXT_SUB(rpki_vrf); + else + rpki_vrf = VTY_GET_CONTEXT(rpki_vrf); + + rpki_vrf->polling_period = POLLING_PERIOD_DEFAULT; return CMD_SUCCESS; } @@ -1018,8 +1590,15 @@ DEFPY (rpki_expire_interval, "Set expire interval\n" "Expire interval value\n") { - if ((unsigned int)tmp >= polling_period) { - expire_interval = tmp; + struct rpki_vrf *rpki_vrf; + + if (vty->node == RPKI_VRF_NODE) + rpki_vrf = VTY_GET_CONTEXT_SUB(rpki_vrf); + else + rpki_vrf = VTY_GET_CONTEXT(rpki_vrf); + + if ((unsigned int)tmp >= rpki_vrf->polling_period) { + rpki_vrf->expire_interval = tmp; return CMD_SUCCESS; } @@ -1034,7 +1613,14 @@ DEFUN (no_rpki_expire_interval, RPKI_OUTPUT_STRING "Set expire interval back to default\n") { - expire_interval = polling_period * 2; + struct rpki_vrf *rpki_vrf; + + if (vty->node == RPKI_VRF_NODE) + rpki_vrf = VTY_GET_CONTEXT_SUB(rpki_vrf); + else + rpki_vrf = VTY_GET_CONTEXT(rpki_vrf); + + rpki_vrf->expire_interval = rpki_vrf->polling_period * 2; return CMD_SUCCESS; } @@ -1045,7 +1631,14 @@ DEFPY (rpki_retry_interval, "Set retry interval\n" "retry interval value\n") { - retry_interval = tmp; + struct rpki_vrf *rpki_vrf; + + if (vty->node == RPKI_VRF_NODE) + rpki_vrf = VTY_GET_CONTEXT_SUB(rpki_vrf); + else + rpki_vrf = VTY_GET_CONTEXT(rpki_vrf); + + rpki_vrf->retry_interval = tmp; return CMD_SUCCESS; } @@ -1056,7 +1649,14 @@ DEFUN (no_rpki_retry_interval, RPKI_OUTPUT_STRING "Set retry interval back to default\n") { - retry_interval = RETRY_INTERVAL_DEFAULT; + struct rpki_vrf *rpki_vrf; + + if (vty->node == RPKI_VRF_NODE) + rpki_vrf = VTY_GET_CONTEXT_SUB(rpki_vrf); + else + rpki_vrf = VTY_GET_CONTEXT(rpki_vrf); + + rpki_vrf->retry_interval = RETRY_INTERVAL_DEFAULT; return CMD_SUCCESS; } @@ -1141,23 +1741,45 @@ DEFPY (rpki_cache, int return_value; struct listnode *cache_node; struct cache *current_cache; + char *pub = NULL; + struct rpki_vrf *rpki_vrf; - for (ALL_LIST_ELEMENTS_RO(cache_list, cache_node, current_cache)) { + if (vty->node == RPKI_VRF_NODE) + rpki_vrf = VTY_GET_CONTEXT_SUB(rpki_vrf); + else + rpki_vrf = VTY_GET_CONTEXT(rpki_vrf); + + if (!rpki_vrf->cache_list) + return CMD_WARNING; + for (ALL_LIST_ELEMENTS_RO(rpki_vrf->cache_list, cache_node, + current_cache)) { if (current_cache->preference == preference) { - vty_out(vty, - "Cache with preference %ld is already configured\n", + vty_out(vty, "Cache with preference %ld " + "is already configured\n", preference); return CMD_WARNING; } } - // use ssh connection if (ssh_uname) { #if defined(FOUND_SSH) + if (ssh_privkey && ssh_pubkey) { + pub = XCALLOC(MTYPE_BGP_RPKI_CACHE, + strlen(ssh_privkey) + 5); + snprintf(pub, strlen(ssh_privkey) + 5, "%s.pub", + ssh_privkey); + if (!strmatch(pub, ssh_pubkey)) { + vty_out(vty, + "ssh public key overriden: %s.pub\n", + ssh_privkey); + } + } return_value = - add_ssh_cache(cache, sshport, ssh_uname, ssh_privkey, - ssh_pubkey, server_pubkey, preference); + add_ssh_cache(rpki_vrf, cache, sshport, ssh_uname, ssh_privkey, + pub, server_pubkey, preference); + if (pub) + XFREE(MTYPE_BGP_RPKI_CACHE, pub); #else return_value = SUCCESS; vty_out(vty, @@ -1166,7 +1788,8 @@ DEFPY (rpki_cache, "If you want to use it\n"); #endif } else { // use tcp connection - return_value = add_tcp_cache(cache, tcpport, preference); + return_value = add_tcp_cache(rpki_vrf, cache, tcpport, + preference); } if (return_value == ERROR) { @@ -1189,17 +1812,27 @@ DEFPY (no_rpki_cache, "Preference of the cache server\n" "Preference value\n") { - struct cache *cache_p = find_cache(preference); + struct cache *cache_p; + struct list *cache_list = NULL; + struct rpki_vrf *rpki_vrf; - if (!cache_p) { + if (vty->node == RPKI_VRF_NODE) + rpki_vrf = VTY_GET_CONTEXT_SUB(rpki_vrf); + else + rpki_vrf = VTY_GET_CONTEXT(rpki_vrf); + + cache_list = rpki_vrf->cache_list; + cache_p = find_cache(preference, cache_list); + if (!rpki_vrf || !cache_p) { vty_out(vty, "Could not find cache %ld\n", preference); return CMD_WARNING; } - if (rtr_is_running && listcount(cache_list) == 1) { - stop(); - } else if (rtr_is_running) { - if (rtr_mgr_remove_group(rtr_config, preference) == RTR_ERROR) { + if (rpki_vrf->rtr_is_running && listcount(rpki_vrf->cache_list) == 1) { + stop(rpki_vrf); + } else if (rpki_vrf->rtr_is_running) { + if (rtr_mgr_remove_group(rpki_vrf->rtr_config, preference) + == RTR_ERROR) { vty_out(vty, "Could not remove cache %ld", preference); vty_out(vty, "\n"); @@ -1215,21 +1848,42 @@ DEFPY (no_rpki_cache, DEFUN (show_rpki_prefix_table, show_rpki_prefix_table_cmd, - "show rpki prefix-table", + "show rpki prefix-table [vrf NAME]", SHOW_STR RPKI_OUTPUT_STRING - "Show validated prefixes which were received from RPKI Cache\n") + "Show validated prefixes which were received from RPKI Cache\n" + VRF_CMD_HELP_STR) { struct listnode *cache_node; struct cache *cache; + struct rpki_vrf *rpki_vrf; + int idx_vrf = 4; + struct vrf *vrf; + char *vrfname = NULL; + + if (argc == 5) { + vrf = vrf_lookup_by_name(argv[idx_vrf]->arg); + if (!vrf) + return CMD_SUCCESS; + if (vrf->vrf_id != VRF_DEFAULT) + vrfname = vrf->name; + } - for (ALL_LIST_ELEMENTS_RO(cache_list, cache_node, cache)) { - vty_out(vty, "host: %s port: %s\n", - cache->tr_config.tcp_config->host, - cache->tr_config.tcp_config->port); + rpki_vrf = find_rpki_vrf(vrfname); + if (!rpki_vrf) + return CMD_SUCCESS; + for (ALL_LIST_ELEMENTS_RO(rpki_vrf->cache_list, cache_node, cache)) { + if (cache->type == TCP) + vty_out(vty, "host: %s port: %s\n", + cache->tr_config.tcp_config->host, + cache->tr_config.tcp_config->port); + else + vty_out(vty, "host: %s port: %u SSH\n", + cache->tr_config.ssh_config->host, + cache->tr_config.ssh_config->port); } - if (is_synchronized()) - print_prefix_table(vty); + if (is_synchronized(rpki_vrf)) + print_prefix_table(vty, rpki_vrf); else vty_out(vty, "No connection to RPKI cache server.\n"); @@ -1237,32 +1891,58 @@ DEFUN (show_rpki_prefix_table, } DEFPY(show_rpki_as_number, show_rpki_as_number_cmd, - "show rpki as-number (1-4294967295)$by_asn", + "show rpki as-number (1-4294967295)$by_asn [vrf NAME$vrfname]", SHOW_STR RPKI_OUTPUT_STRING "Lookup by ASN in prefix table\n" "AS Number\n") { - if (!is_synchronized()) { + struct rpki_vrf *rpki_vrf; + char *vrf_name = NULL; + struct vrf *vrf; + + if (vrfname && !strmatch(vrfname, VRF_DEFAULT_NAME)) { + vrf = vrf_lookup_by_name(vrfname); + if (!vrf) + return CMD_SUCCESS; + vrf_name = vrf->name; + } + /* assume default vrf */ + rpki_vrf = find_rpki_vrf(vrf_name); + + if (!is_synchronized(rpki_vrf)) { vty_out(vty, "No Connection to RPKI cache server.\n"); return CMD_WARNING; } - print_prefix_table_by_asn(vty, by_asn); + print_prefix_table_by_asn(vty, by_asn, rpki_vrf); return CMD_SUCCESS; } DEFPY (show_rpki_prefix, show_rpki_prefix_cmd, - "show rpki prefix <A.B.C.D/M|X:X::X:X/M> [(1-4294967295)$asn]", + "show rpki prefix <A.B.C.D/M|X:X::X:X/M> [(1-4294967295)$asn] [vrf NAME$vrfname]", SHOW_STR RPKI_OUTPUT_STRING "Lookup IP prefix and optionally ASN in prefix table\n" "IPv4 prefix\n" "IPv6 prefix\n" - "AS Number\n") + "AS Number\n" + VRF_CMD_HELP_STR) { + struct rpki_vrf *rpki_vrf; + struct vrf *vrf; + char *vrf_name = NULL; - if (!is_synchronized()) { + if (vrfname && !strmatch(vrfname, VRF_DEFAULT_NAME)) { + vrf = vrf_lookup_by_name(vrfname); + if (!vrf) + return CMD_SUCCESS; + vrf_name = vrf->name; + } + + rpki_vrf = find_rpki_vrf(vrf_name); + + if (!rpki_vrf || !is_synchronized(rpki_vrf)) { vty_out(vty, "No Connection to RPKI cache server.\n"); return CMD_WARNING; } @@ -1283,8 +1963,9 @@ DEFPY (show_rpki_prefix, unsigned int match_count = 0; enum pfxv_state result; - if (pfx_table_validate_r(rtr_config->pfx_table, &matches, &match_count, - asn, &addr, prefix->prefixlen, &result) + if (pfx_table_validate_r(rpki_vrf->rtr_config->pfx_table, &matches, + &match_count, asn, &addr, + prefix->prefixlen, &result) != PFX_SUCCESS) { vty_out(vty, "Prefix lookup failed"); return CMD_WARNING; @@ -1306,15 +1987,32 @@ DEFPY (show_rpki_prefix, DEFUN (show_rpki_cache_server, show_rpki_cache_server_cmd, - "show rpki cache-server", + "show rpki cache-server [vrf NAME]", SHOW_STR RPKI_OUTPUT_STRING - "SHOW configured cache server\n") + "SHOW configured cache server\n" + VRF_CMD_HELP_STR) { struct listnode *cache_node; struct cache *cache; + struct rpki_vrf *rpki_vrf; + int idx_vrf = 4; + struct vrf *vrf; + char *vrfname = NULL; + + if (argc == 5) { + vrf = vrf_lookup_by_name(argv[idx_vrf]->arg); + if (!vrf) + return CMD_SUCCESS; + if (vrf->vrf_id != VRF_DEFAULT) + vrfname = vrf->name; + } - for (ALL_LIST_ELEMENTS_RO(cache_list, cache_node, cache)) { + rpki_vrf = find_rpki_vrf(vrfname); + if (!rpki_vrf) + return CMD_SUCCESS; + + for (ALL_LIST_ELEMENTS_RO(rpki_vrf->cache_list, cache_node, cache)) { if (cache->type == TCP) { vty_out(vty, "host: %s port: %s\n", cache->tr_config.tcp_config->host, @@ -1341,22 +2039,41 @@ DEFUN (show_rpki_cache_server, DEFUN (show_rpki_cache_connection, show_rpki_cache_connection_cmd, - "show rpki cache-connection", + "show rpki cache-connection [vrf NAME]", SHOW_STR RPKI_OUTPUT_STRING - "Show to which RPKI Cache Servers we have a connection\n") + "Show to which RPKI Cache Servers we have a connection\n" + VRF_CMD_HELP_STR) { - if (is_synchronized()) { + struct rpki_vrf *rpki_vrf; + int idx_vrf = 4; + struct vrf *vrf; + char *vrfname = NULL; + + if (argc == 5) { + vrf = vrf_lookup_by_name(argv[idx_vrf]->arg); + if (!vrf) + return CMD_SUCCESS; + if (vrf->vrf_id != VRF_DEFAULT) + vrfname = vrf->name; + } + + rpki_vrf = find_rpki_vrf(vrfname); + if (!rpki_vrf) + return CMD_SUCCESS; + + if (is_synchronized(rpki_vrf)) { struct listnode *cache_node; struct cache *cache; - struct rtr_mgr_group *group = get_connected_group(); + struct rtr_mgr_group *group = get_connected_group(rpki_vrf); - if (!group) { + if (!group || !rpki_vrf->cache_list) { vty_out(vty, "Cannot find a connected group.\n"); return CMD_SUCCESS; } vty_out(vty, "Connected to group %d\n", group->preference); - for (ALL_LIST_ELEMENTS_RO(cache_list, cache_node, cache)) { + for (ALL_LIST_ELEMENTS_RO(rpki_vrf->cache_list, + cache_node, cache)) { if (cache->preference == group->preference) { struct tr_tcp_config *tcp_config; #if defined(FOUND_SSH) @@ -1398,9 +2115,52 @@ DEFUN (show_rpki_cache_connection, return CMD_SUCCESS; } +DEFUN (show_rpki_configuration, + show_rpki_configuration_cmd, + "show rpki configuration [vrf NAME]", + SHOW_STR + RPKI_OUTPUT_STRING + "Show RPKI configuration\n" + VRF_CMD_HELP_STR) +{ + struct rpki_vrf *rpki_vrf; + int idx_vrf = 4; + struct vrf *vrf; + char *vrfname = NULL; + + if (argc == 5) { + vrf = vrf_lookup_by_name(argv[idx_vrf]->arg); + if (!vrf) + return CMD_SUCCESS; + if (vrf->vrf_id != VRF_DEFAULT) + vrfname = vrf->name; + } + + rpki_vrf = find_rpki_vrf(vrfname); + if (!rpki_vrf) + return CMD_SUCCESS; + vty_out(vty, "rpki is %s", + listcount(rpki_vrf->cache_list) ? "Enabled" : "Disabled"); + if (!listcount(rpki_vrf->cache_list)) + return CMD_SUCCESS; + vty_out(vty, " (%d cache servers configured)", + listcount(rpki_vrf->cache_list)); + vty_out(vty, "\n"); + vty_out(vty, "\tpolling period %d\n", rpki_vrf->polling_period); + vty_out(vty, "\tretry interval %d\n", rpki_vrf->retry_interval); + vty_out(vty, "\texpire interval %d\n", rpki_vrf->expire_interval); + return CMD_SUCCESS; +} + static int config_on_exit(struct vty *vty) { - reset(false); + struct rpki_vrf *rpki_vrf; + + if (vty->node == RPKI_VRF_NODE) + rpki_vrf = VTY_GET_CONTEXT_SUB(rpki_vrf); + else + rpki_vrf = VTY_GET_CONTEXT(rpki_vrf); + reset(false, rpki_vrf); return 1; } @@ -1410,7 +2170,13 @@ DEFUN (rpki_reset, RPKI_OUTPUT_STRING "reset rpki\n") { - return reset(true) == SUCCESS ? CMD_SUCCESS : CMD_WARNING; + struct rpki_vrf *rpki_vrf; + + if (vty->node == RPKI_VRF_NODE) + rpki_vrf = VTY_GET_CONTEXT_SUB(rpki_vrf); + else + rpki_vrf = VTY_GET_CONTEXT(rpki_vrf); + return reset(true, rpki_vrf) == SUCCESS ? CMD_SUCCESS : CMD_WARNING; } DEFUN (debug_rpki, @@ -1419,7 +2185,10 @@ DEFUN (debug_rpki, DEBUG_STR "Enable debugging for rpki\n") { - rpki_debug = 1; + if (vty->node == CONFIG_NODE) + rpki_debug_conf = 1; + else + rpki_debug_term = 1; return CMD_SUCCESS; } @@ -1430,7 +2199,10 @@ DEFUN (no_debug_rpki, DEBUG_STR "Disable debugging for rpki\n") { - rpki_debug = 0; + if (vty->node == CONFIG_NODE) + rpki_debug_conf = 0; + else + rpki_debug_term = 0; return CMD_SUCCESS; } @@ -1500,8 +2272,10 @@ static void install_cli_commands(void) // TODO: make config write work install_node(&rpki_node); install_default(RPKI_NODE); + install_node(&rpki_vrf_node); + install_default(RPKI_VRF_NODE); install_element(CONFIG_NODE, &rpki_cmd); - install_element(ENABLE_NODE, &rpki_cmd); + install_element(CONFIG_NODE, &no_rpki_cmd); install_element(ENABLE_NODE, &bgp_rpki_start_cmd); install_element(ENABLE_NODE, &bgp_rpki_stop_cmd); @@ -1533,12 +2307,43 @@ static void install_cli_commands(void) install_element(RPKI_NODE, &rpki_cache_cmd); install_element(RPKI_NODE, &no_rpki_cache_cmd); + /* RPKI_VRF_NODE commands */ + install_element(VRF_NODE, &rpki_cmd); + install_element(VRF_NODE, &no_rpki_cmd); + /* Install rpki reset command */ + install_element(RPKI_VRF_NODE, &rpki_reset_cmd); + + /* Install rpki polling period commands */ + install_element(RPKI_VRF_NODE, &rpki_polling_period_cmd); + install_element(RPKI_VRF_NODE, &no_rpki_polling_period_cmd); + + /* Install rpki expire interval commands */ + install_element(RPKI_VRF_NODE, &rpki_expire_interval_cmd); + install_element(RPKI_VRF_NODE, &no_rpki_expire_interval_cmd); + + /* Install rpki retry interval commands */ + install_element(RPKI_VRF_NODE, &rpki_retry_interval_cmd); + install_element(RPKI_VRF_NODE, &no_rpki_retry_interval_cmd); + + /* Install rpki timeout commands */ + install_element(RPKI_VRF_NODE, &rpki_timeout_cmd); + install_element(RPKI_VRF_NODE, &no_rpki_timeout_cmd); + + /* Install rpki synchronisation timeout commands */ + install_element(RPKI_VRF_NODE, &rpki_synchronisation_timeout_cmd); + install_element(RPKI_VRF_NODE, &no_rpki_synchronisation_timeout_cmd); + + /* Install rpki cache commands */ + install_element(RPKI_VRF_NODE, &rpki_cache_cmd); + install_element(RPKI_VRF_NODE, &no_rpki_cache_cmd); + /* Install show commands */ install_element(VIEW_NODE, &show_rpki_prefix_table_cmd); install_element(VIEW_NODE, &show_rpki_cache_connection_cmd); install_element(VIEW_NODE, &show_rpki_cache_server_cmd); install_element(VIEW_NODE, &show_rpki_prefix_cmd); install_element(VIEW_NODE, &show_rpki_as_number_cmd); + install_element(VIEW_NODE, &show_rpki_configuration_cmd); /* Install debug commands */ install_element(CONFIG_NODE, &debug_rpki_cmd); diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index 449dab12b0..fe52b73438 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -84,6 +84,10 @@ FRR_CFG_DEFAULT_BOOL(BGP_SHOW_HOSTNAME, { .val_bool = true, .match_profile = "datacenter", }, { .val_bool = false }, ) +FRR_CFG_DEFAULT_BOOL(BGP_SHOW_NEXTHOP_HOSTNAME, + { .val_bool = true, .match_profile = "datacenter", }, + { .val_bool = false }, +) FRR_CFG_DEFAULT_BOOL(BGP_LOG_NEIGHBOR_CHANGES, { .val_bool = true, .match_profile = "datacenter", }, { .val_bool = false }, @@ -422,6 +426,8 @@ int bgp_get_vty(struct bgp **bgp, as_t *as, const char *name, SET_FLAG((*bgp)->flags, BGP_FLAG_IMPORT_CHECK); if (DFLT_BGP_SHOW_HOSTNAME) SET_FLAG((*bgp)->flags, BGP_FLAG_SHOW_HOSTNAME); + if (DFLT_BGP_SHOW_NEXTHOP_HOSTNAME) + SET_FLAG((*bgp)->flags, BGP_FLAG_SHOW_NEXTHOP_HOSTNAME); if (DFLT_BGP_LOG_NEIGHBOR_CHANGES) SET_FLAG((*bgp)->flags, BGP_FLAG_LOG_NEIGHBOR_CHANGES); if (DFLT_BGP_DETERMINISTIC_MED) @@ -3100,6 +3106,32 @@ DEFUN (no_bgp_default_show_hostname, return CMD_SUCCESS; } +/* Display hostname in certain command outputs */ +DEFUN (bgp_default_show_nexthop_hostname, + bgp_default_show_nexthop_hostname_cmd, + "bgp default show-nexthop-hostname", + "BGP specific commands\n" + "Configure BGP defaults\n" + "Show hostname for nexthop in certain command outputs\n") +{ + VTY_DECLVAR_CONTEXT(bgp, bgp); + SET_FLAG(bgp->flags, BGP_FLAG_SHOW_NEXTHOP_HOSTNAME); + return CMD_SUCCESS; +} + +DEFUN (no_bgp_default_show_nexthop_hostname, + no_bgp_default_show_nexthop_hostname_cmd, + "no bgp default show-nexthop-hostname", + NO_STR + "BGP specific commands\n" + "Configure BGP defaults\n" + "Show hostname for nexthop in certain command outputs\n") +{ + VTY_DECLVAR_CONTEXT(bgp, bgp); + UNSET_FLAG(bgp->flags, BGP_FLAG_SHOW_NEXTHOP_HOSTNAME); + return CMD_SUCCESS; +} + /* "bgp network import-check" configuration. */ DEFUN (bgp_network_import_check, bgp_network_import_check_cmd, @@ -8861,7 +8893,8 @@ static void bgp_show_failed_summary(struct vty *vty, struct bgp *bgp, /* Show BGP peer's summary information. */ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, - bool show_failed, bool use_json) + bool show_failed, bool show_established, + bool use_json) { struct peer *peer; struct listnode *node, *nnode; @@ -9194,6 +9227,10 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, bgp_show_failed_summary(vty, bgp, peer, json_peer, 0, use_json); } else if (!show_failed) { + if (show_established + && bgp_has_peer_failed(peer, afi, safi)) + continue; + json_peer = json_object_new_object(); if (peer_dynamic_neighbor(peer)) { json_object_boolean_true_add(json_peer, @@ -9283,6 +9320,10 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, max_neighbor_width, use_json); } else if (!show_failed) { + if (show_established + && bgp_has_peer_failed(peer, afi, safi)) + continue; + memset(dn_flag, '\0', sizeof(dn_flag)); if (peer_dynamic_neighbor(peer)) { dn_flag[0] = '*'; @@ -9405,7 +9446,8 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, } static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi, - int safi, bool show_failed, bool use_json) + int safi, bool show_failed, + bool show_established, bool use_json) { int is_first = 1; int afi_wildcard = (afi == AFI_MAX); @@ -9448,7 +9490,8 @@ static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi, false)); } } - bgp_show_summary(vty, bgp, afi, safi, show_failed, + bgp_show_summary(vty, bgp, afi, safi, + show_failed, show_established, use_json); } safi++; @@ -9472,6 +9515,7 @@ static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi, static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi, safi_t safi, bool show_failed, + bool show_established, bool use_json) { struct listnode *node, *nnode; @@ -9501,7 +9545,7 @@ static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi, : bgp->name); } bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_failed, - use_json); + show_established, use_json); } if (use_json) @@ -9511,15 +9555,16 @@ static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi, } int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi, - safi_t safi, bool show_failed, bool use_json) + safi_t safi, bool show_failed, bool show_established, + bool use_json) { struct bgp *bgp; if (name) { if (strmatch(name, "all")) { - bgp_show_all_instances_summary_vty(vty, afi, safi, - show_failed, - use_json); + bgp_show_all_instances_summary_vty( + vty, afi, safi, show_failed, show_established, + use_json); return CMD_SUCCESS; } else { bgp = bgp_lookup_by_name(name); @@ -9534,7 +9579,8 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi, } bgp_show_summary_afi_safi(vty, bgp, afi, safi, - show_failed, use_json); + show_failed, show_established, + use_json); return CMD_SUCCESS; } } @@ -9543,7 +9589,7 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi, if (bgp) bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_failed, - use_json); + show_established, use_json); else { if (use_json) vty_out(vty, "{}\n"); @@ -9558,7 +9604,7 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi, /* `show [ip] bgp summary' commands. */ DEFUN (show_ip_bgp_summary, show_ip_bgp_summary_cmd, - "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]] summary [failed] [json]", + "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]] summary [established|failed] [json]", SHOW_STR IP_STR BGP_STR @@ -9566,6 +9612,7 @@ DEFUN (show_ip_bgp_summary, BGP_AFI_HELP_STR BGP_SAFI_WITH_LABEL_HELP_STR "Summary of BGP neighbor status\n" + "Show only sessions in Established state\n" "Show only sessions not in Established state\n" JSON_STR) { @@ -9573,6 +9620,7 @@ DEFUN (show_ip_bgp_summary, afi_t afi = AFI_MAX; safi_t safi = SAFI_MAX; bool show_failed = false; + bool show_established = false; int idx = 0; @@ -9594,10 +9642,13 @@ DEFUN (show_ip_bgp_summary, if (argv_find(argv, argc, "failed", &idx)) show_failed = true; + if (argv_find(argv, argc, "established", &idx)) + show_established = true; bool uj = use_json(argc, argv); - return bgp_show_summary_vty(vty, vrf, afi, safi, show_failed, uj); + return bgp_show_summary_vty(vty, vrf, afi, safi, show_failed, + show_established, uj); } const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json) @@ -15261,6 +15312,15 @@ int bgp_config_write(struct vty *vty) ? "" : "no "); + /* BGP default show-nexthop-hostname */ + if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_SHOW_NEXTHOP_HOSTNAME) + != SAVE_BGP_SHOW_HOSTNAME) + vty_out(vty, " %sbgp default show-nexthop-hostname\n", + CHECK_FLAG(bgp->flags, + BGP_FLAG_SHOW_NEXTHOP_HOSTNAME) + ? "" + : "no "); + /* BGP default subgroup-pkt-queue-max. */ if (bgp->default_subgroup_pkt_queue_max != BGP_DEFAULT_SUBGROUP_PKT_QUEUE_MAX) @@ -15891,6 +15951,10 @@ void bgp_vty_init(void) install_element(BGP_NODE, &bgp_default_show_hostname_cmd); install_element(BGP_NODE, &no_bgp_default_show_hostname_cmd); + /* bgp default show-nexthop-hostname */ + install_element(BGP_NODE, &bgp_default_show_nexthop_hostname_cmd); + install_element(BGP_NODE, &no_bgp_default_show_nexthop_hostname_cmd); + /* "bgp default subgroup-pkt-queue-max" commands. */ install_element(BGP_NODE, &bgp_default_subgroup_pkt_queue_max_cmd); install_element(BGP_NODE, &no_bgp_default_subgroup_pkt_queue_max_cmd); diff --git a/bgpd/bgp_vty.h b/bgpd/bgp_vty.h index d6ca198d09..95eefbc36f 100644 --- a/bgpd/bgp_vty.h +++ b/bgpd/bgp_vty.h @@ -178,6 +178,7 @@ extern int bgp_vty_find_and_parse_afi_safi_bgp(struct vty *vty, int bgp_vty_find_and_parse_bgp(struct vty *vty, struct cmd_token **argv, int argc, struct bgp **bgp, bool use_json); extern int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi, - safi_t safi, bool show_failed, bool use_json); + safi_t safi, bool show_failed, + bool show_established, bool use_json); #endif /* _QUAGGA_BGP_VTY_H */ diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index 4a5772a53b..b71f7c6ce2 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -43,6 +43,9 @@ #include "bgp_labelpool.h" #include "bgp_addpath_types.h" +DECLARE_HOOK(bgp_hook_config_write_vrf, (struct vty *vty, struct vrf *vrf), + (vty, vrf)) + #define BGP_MAX_HOSTNAME 64 /* Linux max, is larger than most other sys */ #define BGP_PEER_MAX_HASH_SIZE 16384 @@ -447,6 +450,7 @@ struct bgp { #define BGP_FLAG_SELECT_DEFER_DISABLE (1 << 23) #define BGP_FLAG_GR_DISABLE_EOR (1 << 24) #define BGP_FLAG_EBGP_REQUIRES_POLICY (1 << 25) +#define BGP_FLAG_SHOW_NEXTHOP_HOSTNAME (1 << 26) enum global_mode GLOBAL_GR_FSM[BGP_GLOBAL_GR_MODE] [BGP_GLOBAL_GR_EVENT_CMD]; @@ -677,6 +681,8 @@ DECLARE_HOOK(bgp_inst_delete, (struct bgp *bgp), (bgp)) DECLARE_HOOK(bgp_inst_config_write, (struct bgp *bgp, struct vty *vty), (bgp, vty)) +DECLARE_HOOK(bgp_hook_vrf_update, (struct vrf *vrf, bool enabled), + (vrf, enabled)) /* Thread callback information */ struct afi_safi_info { diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index 388d479bab..bb968735b9 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -1366,6 +1366,19 @@ Configuring Peers on by default or not. This command defaults to on and is not displayed. The `no bgp default ipv4-unicast` form of the command is displayed. +.. index:: [no] bgp default show-hostname +.. clicmd:: [no] bgp default show-hostname + + This command shows the hostname of the peer in certain BGP commands + outputs. It's easier to troubleshoot if you have a number of BGP peers. + +.. index:: [no] bgp default show-nexthop-hostname +.. clicmd:: [no] bgp default show-nexthop-hostname + + This command shows the hostname of the next-hop in certain BGP commands + outputs. It's easier to troubleshoot if you have a number of BGP peers + and a number of routes to check. + .. index:: [no] neighbor PEER advertisement-interval (0-600) .. clicmd:: [no] neighbor PEER advertisement-interval (0-600) @@ -2731,6 +2744,12 @@ structure is extended with :clicmd:`show bgp [afi] [safi]`. Show a bgp peer summary for peers that are not succesfully exchanging routes for the specified address family, and subsequent address-family. +.. index:: show bgp [afi] [safi] summary established [json] +.. clicmd:: show bgp [afi] [safi] summary established [json] + + Show a bgp peer summary for peers that are succesfully exchanging routes + for the specified address family, and subsequent address-family. + .. index:: show bgp [afi] [safi] neighbor [PEER] .. clicmd:: show bgp [afi] [safi] neighbor [PEER] diff --git a/doc/user/pim.rst b/doc/user/pim.rst index 8834d28abb..919dc51f69 100644 --- a/doc/user/pim.rst +++ b/doc/user/pim.rst @@ -166,10 +166,22 @@ Certain signals have special meanings to *pimd*. urib-only Lookup in the Unicast Rib only. -.. index:: no ip msdp mesh-group [WORD] -.. clicmd:: no ip msdp mesh-group [WORD] +.. index:: [no] ip msdp mesh-group [WORD] +.. clicmd:: [no] ip msdp mesh-group [WORD] - Delete multicast source discovery protocol mesh-group + Create or Delete a multicast source discovery protocol mesh-group using + [WORD] as the group name. + +.. index:: [no] ip msdp mesh-group WORD member A.B.C.D +.. clicmd:: [no] ip msdp mesh-group WORD member A.B.C.D + + Attach or Delete A.B.C.D to the MSDP mesh group WORD specified. + +.. index:: [no] ip msdp mesh-group WORD source A.B.C.D +.. clicmd:: [no] ip msdp mesh-group WORD source A.B.C.D + + For the address specified A.B.C.D use that as the source address for + mesh group packets being sent. .. index:: ip igmp generate-query-once [version (2-3)] .. clicmd:: ip igmp generate-query-once [version (2-3)] diff --git a/doc/user/rpki.rst b/doc/user/rpki.rst index f8ec98c964..77cadb6ae7 100644 --- a/doc/user/rpki.rst +++ b/doc/user/rpki.rst @@ -60,8 +60,9 @@ Enabling RPKI This command enables the RPKI configuration mode. Most commands that start with *rpki* can only be used in this mode. - When it is used in a telnet session, leaving of this mode cause rpki to be - initialized. + This command is available either in *configure node* for default *vrf* or + in *vrf node* for specific *vrf*. When it is used in a telnet session, + leaving of this mode cause rpki to be initialized. Executing this command alone does not activate prefix validation. You need to configure at least one reachable cache server. See section @@ -91,6 +92,9 @@ Examples of the error:: router(config)# rpki % [BGP] Unknown command: rpki + router(config-vrf)# rpki + % [BGP] Unknown command: rpki + Note that the RPKI commands will be available in vtysh when running ``find rpki`` regardless of whether the module is loaded. @@ -99,7 +103,14 @@ Note that the RPKI commands will be available in vtysh when running Configuring RPKI/RTR Cache Servers ---------------------------------- -The following commands are independent of a specific cache server. +RPKI/RTR can be configured independently, either in configure node, or in *vrf* +sub context. If configured in configure node, the core *bgp* instance of default +*vrf* is impacted by the configuration. + +Each RPKI/RTR context is mapped to a *vrf* and can be made up of a specific list +of cache-servers, and specific settings. + +The following commands are available for independent of a specific cache server. .. index:: rpki polling_period (1-3600) .. clicmd:: rpki polling_period (1-3600) @@ -200,27 +211,27 @@ Debugging Displaying RPKI --------------- -.. index:: show rpki prefix <A.B.C.D/M|X:X::X:X/M> [(1-4294967295)] -.. clicmd:: show rpki prefix <A.B.C.D/M|X:X::X:X/M> [(1-4294967295)] +.. index:: show rpki prefix <A.B.C.D/M|X:X::X:X/M> [(1-4294967295)] [vrf NAME] +.. clicmd:: show rpki prefix <A.B.C.D/M|X:X::X:X/M> [(1-4294967295)] [vrf NAME] Display validated prefixes received from the cache servers filtered by the specified prefix. -.. index:: show rpki as-number ASN -.. clicmd:: show rpki as-number ASN +.. index:: show rpki as-number ASN [vrf NAME] +.. clicmd:: show rpki as-number ASN [vrf NAME] Display validated prefixes received from the cache servers filtered by ASN. -.. index:: show rpki prefix-table -.. clicmd:: show rpki prefix-table +.. index:: show rpki prefix-table [vrf NAME] +.. clicmd:: show rpki prefix-table [vrf NAME] Display all validated prefix to origin AS mappings/records which have been received from the cache servers and stored in the router. Based on this data, the router validates BGP Updates. -.. index:: show rpki cache-connection -.. clicmd:: show rpki cache-connection +.. index:: show rpki cache-connection [vrf NAME] +.. clicmd:: show rpki cache-connection [vrf NAME] Display all configured cache servers, whether active or not. @@ -271,5 +282,54 @@ RPKI Configuration Example route-map rpki permit 40 ! +RPKI Configuration Example with VRF +----------------------------------- + +.. code-block:: frr + + hostname bgpd1 + password zebra + ! log stdout + debug bgp updates + debug bgp keepalives + debug rpki + ! + vrf vrf_connect + rpki + rpki polling_period 1000 + rpki timeout 10 + ! SSH Example: + rpki cache example.com 22 rtr-ssh ./ssh_key/id_rsa ./ssh_key/id_rsa.pub preference 1 + ! TCP Example: + rpki cache rpki-validator.realmv6.org 8282 preference 2 + exit + ! + exit-vrf + router bgp 60001 vrf vrf_connect + bgp router-id 141.22.28.223 + network 192.168.0.0/16 + neighbor 123.123.123.0 remote-as 60002 + neighbor 123.123.123.0 route-map rpki in + ! + address-family ipv6 + neighbor 123.123.123.0 activate + neighbor 123.123.123.0 route-map rpki in + exit-address-family + ! + route-map rpki permit 10 + match rpki invalid + set local-preference 10 + ! + route-map rpki permit 20 + match rpki notfound + set local-preference 20 + ! + route-map rpki permit 30 + match rpki valid + set local-preference 30 + ! + route-map rpki permit 40 + ! + .. [Securing-BGP] Geoff Huston, Randy Bush: Securing BGP, In: The Internet Protocol Journal, Volume 14, No. 2, 2011. <http://www.cisco.com/web/about/ac123/ac147/archived_issues/ipj_14-2/142_bgp.html> .. [Resource-Certification] Geoff Huston: Resource Certification, In: The Internet Protocol Journal, Volume 12, No.1, 2009. <http://www.cisco.com/web/about/ac123/ac147/archived_issues/ipj_12-1/121_resource.html> diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c index 03d7b3d07b..e0e82e4725 100644 --- a/isisd/isis_circuit.c +++ b/isisd/isis_circuit.c @@ -721,7 +721,7 @@ void isis_circuit_down(struct isis_circuit *circuit) #endif /* ifndef FABRICD */ /* log adjacency changes if configured to do so */ - if (circuit->area && circuit->area->log_adj_changes) { + if (circuit->area->log_adj_changes) { struct isis_adjacency *adj = NULL; if (circuit->circ_type == CIRCUIT_T_P2P) { adj = circuit->u.p2p.neighbor; diff --git a/lib/command.c b/lib/command.c index 80b75d9b23..fc43cce189 100644 --- a/lib/command.c +++ b/lib/command.c @@ -841,6 +841,9 @@ enum node_type node_parent(enum node_type node) case BFD_PROFILE_NODE: ret = BFD_NODE; break; + case RPKI_VRF_NODE: + ret = VRF_NODE; + break; default: ret = CONFIG_NODE; break; diff --git a/lib/command.h b/lib/command.h index 21bb613540..9e0fc783c7 100644 --- a/lib/command.h +++ b/lib/command.h @@ -159,6 +159,7 @@ enum node_type { OPENFABRIC_NODE, /* OpenFabric router configuration node */ VRRP_NODE, /* VRRP node */ BMP_NODE, /* BMP config under router bgp */ + RPKI_VRF_NODE, /* RPKI node for VRF */ NODE_TYPE_MAX, /* maximum */ }; diff --git a/lib/frr_pthread.c b/lib/frr_pthread.c index e237934f81..da9594ed80 100644 --- a/lib/frr_pthread.c +++ b/lib/frr_pthread.c @@ -159,10 +159,20 @@ static void *frr_pthread_inner(void *arg) int frr_pthread_run(struct frr_pthread *fpt, const pthread_attr_t *attr) { int ret; + sigset_t oldsigs, blocksigs; + + /* Ensure we never handle signals on a background thread by blocking + * everything here (new thread inherits signal mask) + */ + sigfillset(&blocksigs); + pthread_sigmask(SIG_BLOCK, &blocksigs, &oldsigs); fpt->rcu_thread = rcu_thread_prepare(); ret = pthread_create(&fpt->thread, attr, frr_pthread_inner, fpt); + /* Restore caller's signals */ + pthread_sigmask(SIG_SETMASK, &oldsigs, NULL); + /* * Per pthread_create(3), the contents of fpt->thread are undefined if * pthread_create() did not succeed. Reset this value to zero. @@ -383,6 +383,17 @@ struct interface *if_lookup_by_name(const char *name, vrf_id_t vrf_id) return RB_FIND(if_name_head, &vrf->ifaces_by_name, &if_tmp); } +struct interface *if_lookup_by_name_vrf(const char *name, struct vrf *vrf) +{ + struct interface if_tmp; + + if (!name || strnlen(name, INTERFACE_NAMSIZ) == INTERFACE_NAMSIZ) + return NULL; + + strlcpy(if_tmp.name, name, sizeof(if_tmp.name)); + return RB_FIND(if_name_head, &vrf->ifaces_by_name, &if_tmp); +} + struct interface *if_lookup_by_name_all_vrf(const char *name) { struct vrf *vrf; @@ -522,7 +522,9 @@ extern struct interface *if_lookup_prefix(const struct prefix *prefix, size_t if_lookup_by_hwaddr(const uint8_t *hw_addr, size_t addrsz, struct interface ***result, vrf_id_t vrf_id); +struct vrf; extern struct interface *if_lookup_by_name_all_vrf(const char *ifname); +extern struct interface *if_lookup_by_name_vrf(const char *name, struct vrf *vrf); extern struct interface *if_lookup_by_name(const char *ifname, vrf_id_t vrf_id); extern struct interface *if_get_by_name(const char *ifname, vrf_id_t vrf_id); extern struct interface *if_get_by_ifindex(ifindex_t ifindex, vrf_id_t vrf_id, @@ -551,7 +553,6 @@ extern bool if_is_loopback_or_vrf(const struct interface *ifp); extern int if_is_broadcast(const struct interface *ifp); extern int if_is_pointopoint(const struct interface *ifp); extern int if_is_multicast(const struct interface *ifp); -struct vrf; extern void if_terminate(struct vrf *vrf); extern void if_dump_all(void); extern const char *if_flag_dump(unsigned long); diff --git a/lib/nexthop.c b/lib/nexthop.c index 0d239e091b..3496081d47 100644 --- a/lib/nexthop.c +++ b/lib/nexthop.c @@ -187,35 +187,41 @@ int nexthop_cmp(const struct nexthop *next1, const struct nexthop *next2) return ret; } -int nexthop_same_firsthop(struct nexthop *next1, struct nexthop *next2) +bool nexthop_same_firsthop(const struct nexthop *next1, + const struct nexthop *next2) { + /* Map the TYPE_IPx types to TYPE_IPx_IFINDEX */ int type1 = NEXTHOP_FIRSTHOPTYPE(next1->type); int type2 = NEXTHOP_FIRSTHOPTYPE(next2->type); if (type1 != type2) - return 0; + return false; + + if (next1->vrf_id != next2->vrf_id) + return false; + switch (type1) { case NEXTHOP_TYPE_IPV4_IFINDEX: if (!IPV4_ADDR_SAME(&next1->gate.ipv4, &next2->gate.ipv4)) - return 0; + return false; if (next1->ifindex != next2->ifindex) - return 0; + return false; break; case NEXTHOP_TYPE_IFINDEX: if (next1->ifindex != next2->ifindex) - return 0; + return false; break; case NEXTHOP_TYPE_IPV6_IFINDEX: if (!IPV6_ADDR_SAME(&next1->gate.ipv6, &next2->gate.ipv6)) - return 0; + return false; if (next1->ifindex != next2->ifindex) - return 0; + return false; break; default: /* do nothing */ break; } - return 1; + return true; } /* diff --git a/lib/nexthop.h b/lib/nexthop.h index 9b71262589..eda88efc08 100644 --- a/lib/nexthop.h +++ b/lib/nexthop.h @@ -208,7 +208,8 @@ extern int nexthop_g_addr_cmp(enum nexthop_types_t type, extern const char *nexthop_type_to_str(enum nexthop_types_t nh_type); extern bool nexthop_labels_match(const struct nexthop *nh1, const struct nexthop *nh2); -extern int nexthop_same_firsthop(struct nexthop *next1, struct nexthop *next2); +extern bool nexthop_same_firsthop(const struct nexthop *next1, + const struct nexthop *next2); extern const char *nexthop2str(const struct nexthop *nexthop, char *str, int size); diff --git a/lib/routemap.c b/lib/routemap.c index 3b45133450..7749ea4cc7 100644 --- a/lib/routemap.c +++ b/lib/routemap.c @@ -47,18 +47,12 @@ DEFINE_QOBJ_TYPE(route_map) #define IPv4_PREFIX_LIST "ip address prefix-list" #define IPv6_PREFIX_LIST "ipv6 address prefix-list" -#define IPv4_MATCH_RULE "ip " -#define IPv6_MATCH_RULE "ipv6 " #define IS_RULE_IPv4_PREFIX_LIST(S) \ (strncmp(S, IPv4_PREFIX_LIST, strlen(IPv4_PREFIX_LIST)) == 0) #define IS_RULE_IPv6_PREFIX_LIST(S) \ (strncmp(S, IPv6_PREFIX_LIST, strlen(IPv6_PREFIX_LIST)) == 0) -#define IS_IPv4_RULE(S) \ - (strncmp(S, IPv4_MATCH_RULE, strlen(IPv4_MATCH_RULE)) == 0) -#define IS_IPv6_RULE(S) \ - (strncmp(S, IPv6_MATCH_RULE, strlen(IPv6_MATCH_RULE)) == 0) struct route_map_pentry_dep { struct prefix_list_entry *pentry; const char *plist_name; @@ -86,8 +80,6 @@ static void route_map_del_plist_entries(afi_t afi, struct route_map_index *index, const char *plist_name, struct prefix_list_entry *entry); -static bool route_map_is_ip_rule_present(struct route_map_index *index); -static bool route_map_is_ipv6_rule_present(struct route_map_index *index); static struct hash *route_map_get_dep_hash(route_map_event_t event); @@ -1370,26 +1362,6 @@ enum rmap_compile_rets route_map_add_match(struct route_map_index *index, } else if (IS_RULE_IPv6_PREFIX_LIST(match_name)) { route_map_pfx_tbl_update(RMAP_EVENT_PLIST_ADDED, index, AFI_IP6, match_arg); - } else { - /* If IPv4 match criteria has been added to the route-map - * index, check for IPv6 prefix-list match rule presence and - * remove this index from the trie node created for each of the - * prefix-entry within the prefix-list. If no IPv6 prefix-list - * match rule is present, remove this index from the IPv6 - * default route's trie node. - */ - if (IS_IPv4_RULE(match_name)) - route_map_del_plist_entries(AFI_IP6, index, NULL, NULL); - - /* If IPv6 match criteria has been added to the route-map - * index, check for IPv4 prefix-list match rule presence and - * remove this index from the trie node created for each of the - * prefix-entry within the prefix-list. If no IPv4 prefix-list - * match rule is present, remove this index from the IPv4 - * default route's trie node. - */ - else if (IS_IPv6_RULE(match_name)) - route_map_del_plist_entries(AFI_IP, index, NULL, NULL); } /* Execute event hook. */ @@ -1441,7 +1413,7 @@ enum rmap_compile_rets route_map_delete_match(struct route_map_index *index, route_map_rule_delete(&index->match_list, rule); /* If IPv4 or IPv6 prefix-list match criteria - * has been delete to the route-map index, update + * has been delete from the route-map index, update * the route-map's prefix table. */ if (IS_RULE_IPv4_PREFIX_LIST(match_name)) { @@ -1452,30 +1424,6 @@ enum rmap_compile_rets route_map_delete_match(struct route_map_index *index, route_map_pfx_tbl_update( RMAP_EVENT_PLIST_DELETED, index, AFI_IP6, match_arg); - } else { - /* If no more IPv4 match rules are present in - * this index, check for IPv6 prefix-list match - * rule presence and add this index to trie node - * created for each of the prefix-entry within - * the prefix-list. If no IPv6 prefix-list match - * rule is present, add this index to the IPv6 - * default route's trie node. - */ - if (!route_map_is_ip_rule_present(index)) - route_map_add_plist_entries( - AFI_IP6, index, NULL, NULL); - - /* If no more IPv6 match rules are present in - * this index, check for IPv4 prefix-list match - * rule presence and add this index to trie node - * created for each of the prefix-entry within - * the prefix-list. If no IPv6 prefix-list match - * rule is present, add this index to the IPv4 - * default route's trie node. - */ - if (!route_map_is_ipv6_rule_present(index)) - route_map_add_plist_entries( - AFI_IP, index, NULL, NULL); } return RMAP_COMPILE_SUCCESS; @@ -1921,33 +1869,34 @@ static void route_map_pfx_table_del(struct route_table *table, route_unlock_node(rn); } -/* This function checks for the presence of an IPv4 match rule - * in the given route-map index. +/* This function checks for the presence of an IPv4 prefix-list + * match rule in the given route-map index. */ -static bool route_map_is_ip_rule_present(struct route_map_index *index) +static bool route_map_is_ip_pfx_list_rule_present(struct route_map_index *index) { struct route_map_rule_list *match_list = NULL; struct route_map_rule *rule = NULL; match_list = &index->match_list; for (rule = match_list->head; rule; rule = rule->next) - if (IS_IPv4_RULE(rule->cmd->str)) + if (IS_RULE_IPv4_PREFIX_LIST(rule->cmd->str)) return true; return false; } -/* This function checks for the presence of an IPv6 match rule - * in the given route-map index. +/* This function checks for the presence of an IPv6 prefix-list + * match rule in the given route-map index. */ -static bool route_map_is_ipv6_rule_present(struct route_map_index *index) +static bool +route_map_is_ipv6_pfx_list_rule_present(struct route_map_index *index) { struct route_map_rule_list *match_list = NULL; struct route_map_rule *rule = NULL; match_list = &index->match_list; for (rule = match_list->head; rule; rule = rule->next) - if (IS_IPv6_RULE(rule->cmd->str)) + if (IS_RULE_IPv6_PREFIX_LIST(rule->cmd->str)) return true; return false; @@ -2118,7 +2067,7 @@ static void route_map_trie_update(afi_t afi, route_map_event_t event, { if (event == RMAP_EVENT_PLIST_ADDED) { if (afi == AFI_IP) { - if (!route_map_is_ipv6_rule_present(index)) { + if (!route_map_is_ipv6_pfx_list_rule_present(index)) { route_map_pfx_table_del_default(AFI_IP6, index); route_map_add_plist_entries(afi, index, plist_name, NULL); @@ -2127,7 +2076,7 @@ static void route_map_trie_update(afi_t afi, route_map_event_t event, NULL, NULL); } } else { - if (!route_map_is_ip_rule_present(index)) { + if (!route_map_is_ip_pfx_list_rule_present(index)) { route_map_pfx_table_del_default(AFI_IP, index); route_map_add_plist_entries(afi, index, plist_name, NULL); @@ -2141,22 +2090,36 @@ static void route_map_trie_update(afi_t afi, route_map_event_t event, route_map_del_plist_entries(afi, index, plist_name, NULL); - if (!route_map_is_ipv6_rule_present(index)) + /* If IPv6 prefix-list match rule is not present, + * add this index to the IPv4 default route's trie + * node. + * Also, add this index to the trie nodes created + * for each of the prefix-entries within the IPv6 + * prefix-list, if the IPv6 prefix-list match rule + * is present. Else, add this index to the IPv6 + * default route's trie node. + */ + if (!route_map_is_ipv6_pfx_list_rule_present(index)) route_map_pfx_table_add_default(afi, index); - if (!route_map_is_ip_rule_present(index)) - route_map_add_plist_entries(AFI_IP6, index, - NULL, NULL); + route_map_add_plist_entries(AFI_IP6, index, NULL, NULL); } else { route_map_del_plist_entries(afi, index, plist_name, NULL); - if (!route_map_is_ip_rule_present(index)) + /* If IPv4 prefix-list match rule is not present, + * add this index to the IPv6 default route's trie + * node. + * Also, add this index to the trie nodes created + * for each of the prefix-entries within the IPv4 + * prefix-list, if the IPv4 prefix-list match rule + * is present. Else, add this index to the IPv4 + * default route's trie node. + */ + if (!route_map_is_ip_pfx_list_rule_present(index)) route_map_pfx_table_add_default(afi, index); - if (!route_map_is_ipv6_rule_present(index)) - route_map_add_plist_entries(AFI_IP, index, NULL, - NULL); + route_map_add_plist_entries(AFI_IP, index, NULL, NULL); } } } @@ -2229,30 +2192,27 @@ static void route_map_pentry_update(route_map_event_t event, } if (event == RMAP_EVENT_PLIST_ADDED) { - if (plist->count == 1) { - if (afi == AFI_IP) { - if (!route_map_is_ipv6_rule_present(index)) - route_map_add_plist_entries( - afi, index, plist_name, pentry); - } else { - if (!route_map_is_ip_rule_present(index)) - route_map_add_plist_entries( - afi, index, plist_name, pentry); - } + if (afi == AFI_IP) { + if (!route_map_is_ipv6_pfx_list_rule_present(index)) + route_map_add_plist_entries(afi, index, + plist_name, pentry); } else { - route_map_add_plist_entries(afi, index, plist_name, - pentry); + if (!route_map_is_ip_pfx_list_rule_present(index)) + route_map_add_plist_entries(afi, index, + plist_name, pentry); } } else if (event == RMAP_EVENT_PLIST_DELETED) { route_map_del_plist_entries(afi, index, plist_name, pentry); if (plist->count == 1) { if (afi == AFI_IP) { - if (!route_map_is_ipv6_rule_present(index)) + if (!route_map_is_ipv6_pfx_list_rule_present( + index)) route_map_pfx_table_add_default(afi, index); } else { - if (!route_map_is_ip_rule_present(index)) + if (!route_map_is_ip_pfx_list_rule_present( + index)) route_map_pfx_table_add_default(afi, index); } diff --git a/sharpd/sharp_zebra.c b/sharpd/sharp_zebra.c index c47f2105cb..7ab2d6ec22 100644 --- a/sharpd/sharp_zebra.c +++ b/sharpd/sharp_zebra.c @@ -435,6 +435,12 @@ static int sharp_debug_nexthops(struct zapi_route *api) int i; char buf[PREFIX_STRLEN]; + if (api->nexthop_num == 0) { + zlog_debug( + " Not installed"); + return 0; + } + for (i = 0; i < api->nexthop_num; i++) { struct zapi_nexthop *znh = &api->nexthops[i]; diff --git a/staticd/static_zebra.c b/staticd/static_zebra.c index 5cadf34365..c42f632ffb 100644 --- a/staticd/static_zebra.c +++ b/staticd/static_zebra.c @@ -363,6 +363,7 @@ extern void static_zebra_route_add(struct route_node *rn, memcpy(&api.src_prefix, src_pp, sizeof(api.src_prefix)); } SET_FLAG(api.flags, ZEBRA_FLAG_RR_USE_DISTANCE); + SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION); SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP); if (si_changed->distance) { SET_FLAG(api.message, ZAPI_MESSAGE_DISTANCE); diff --git a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py index 087ba21e5e..948f641afb 100755 --- a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py +++ b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py @@ -63,7 +63,7 @@ from lib.common_config import ( reset_config_on_routers, ) from lib.topolog import logger -from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify +from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation @@ -295,7 +295,7 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type][:int(ecmp_num)], + next_hop=NEXT_HOPS[addr_type][: int(ecmp_num)], protocol=protocol, ) assert result is True, "Testcase {} : Failed \n Error: {}".format( @@ -336,8 +336,12 @@ def test_ecmp_after_clear_bgp(request, test_type): tc_name, result ) - # Clear bgp - result = clear_bgp_and_verify(tgen, topo, dut) + # Clear BGP + for addr_type in ADDR_TYPES: + clear_bgp(tgen, addr_type, dut) + + # Verify BGP convergence + result = verify_bgp_convergence(tgen, topo) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: diff --git a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py index 94409ff3e1..5b997fdd16 100755 --- a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py +++ b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py @@ -63,7 +63,7 @@ from lib.common_config import ( reset_config_on_routers, ) from lib.topolog import logger -from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify +from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp from lib.topojson import build_topo_from_json, build_config_from_json # Reading the data from JSON File for topology and configuration creation @@ -296,7 +296,7 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type][:int(ecmp_num)], + next_hop=NEXT_HOPS[addr_type][: int(ecmp_num)], protocol=protocol, ) assert result is True, "Testcase {} : Failed \n Error: {}".format( @@ -337,8 +337,12 @@ def test_ecmp_after_clear_bgp(request, test_type): tc_name, result ) - # Clear bgp - result = clear_bgp_and_verify(tgen, topo, dut) + # Clear BGP + for addr_type in ADDR_TYPES: + clear_bgp(tgen, addr_type, dut) + + # Verify BGP convergence + result = verify_bgp_convergence(tgen, topo) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: diff --git a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py index b0ff3ac437..607b036c6a 100755 --- a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py +++ b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py @@ -56,7 +56,6 @@ import pdb import json import time import inspect -import ipaddress from time import sleep import pytest diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/__init__.py b/tests/topotests/bgp_show_ip_bgp_fqdn/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 --- a/tests/topotests/bgp_show_ip_bgp_fqdn/__init__.py +++ /dev/null diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r1/bgpd.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r1/bgpd.conf deleted file mode 100644 index f0df56e947..0000000000 --- a/tests/topotests/bgp_show_ip_bgp_fqdn/r1/bgpd.conf +++ /dev/null @@ -1,5 +0,0 @@ -router bgp 65000 - no bgp ebgp-requires-policy - neighbor 192.168.255.2 remote-as 65001 - address-family ipv4 unicast - redistribute connected diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r1/zebra.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r1/zebra.conf deleted file mode 100644 index 0a283c06d5..0000000000 --- a/tests/topotests/bgp_show_ip_bgp_fqdn/r1/zebra.conf +++ /dev/null @@ -1,9 +0,0 @@ -! -interface lo - ip address 172.16.255.254/32 -! -interface r1-eth0 - ip address 192.168.255.1/24 -! -ip forwarding -! diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r2/bgpd.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r2/bgpd.conf deleted file mode 100644 index 422a7345f9..0000000000 --- a/tests/topotests/bgp_show_ip_bgp_fqdn/r2/bgpd.conf +++ /dev/null @@ -1,5 +0,0 @@ -router bgp 65001 - no bgp ebgp-requires-policy - bgp default show-hostname - neighbor 192.168.255.1 remote-as 65000 - neighbor 192.168.254.1 remote-as 65001 diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r2/zebra.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r2/zebra.conf deleted file mode 100644 index e9e2e4391f..0000000000 --- a/tests/topotests/bgp_show_ip_bgp_fqdn/r2/zebra.conf +++ /dev/null @@ -1,12 +0,0 @@ -! -interface lo - ip address 172.16.255.253/32 -! -interface r2-eth0 - ip address 192.168.255.2/24 -! -interface r2-eth1 - ip address 192.168.254.2/24 -! -ip forwarding -! diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r3/bgpd.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r3/bgpd.conf deleted file mode 100644 index 8fcf6a736d..0000000000 --- a/tests/topotests/bgp_show_ip_bgp_fqdn/r3/bgpd.conf +++ /dev/null @@ -1,3 +0,0 @@ -router bgp 65001 - bgp default show-hostname - neighbor 192.168.254.2 remote-as 65001 diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/r3/zebra.conf b/tests/topotests/bgp_show_ip_bgp_fqdn/r3/zebra.conf deleted file mode 100644 index a8b8bc38c5..0000000000 --- a/tests/topotests/bgp_show_ip_bgp_fqdn/r3/zebra.conf +++ /dev/null @@ -1,6 +0,0 @@ -! -interface r3-eth0 - ip address 192.168.254.1/24 -! -ip forwarding -! diff --git a/tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py b/tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py deleted file mode 100644 index e8ad180935..0000000000 --- a/tests/topotests/bgp_show_ip_bgp_fqdn/test_bgp_show_ip_bgp_fqdn.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env python - -# -# test_bgp_show_ip_bgp_fqdn.py -# Part of NetDEF Topology Tests -# -# Copyright (c) 2019 by -# Donatas Abraitis <donatas.abraitis@gmail.com> -# -# Permission to use, copy, modify, and/or distribute this software -# for any purpose with or without fee is hereby granted, provided -# that the above copyright notice and this permission notice appear -# in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY -# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS -# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE -# OF THIS SOFTWARE. -# - -""" -test_bgp_show_ip_bgp_fqdn.py: -Test if FQND is visible in `show [ip] bgp` output if -`bgp default show-hostname` is toggled. - -Topology: -r1 <-- eBGP --> r2 <-- iBGP --> r3 - -1. Check if both hostname and ip are added to JSON output -for 172.16.255.254/32 on r2. -2. Check if only ip is added to JSON output for 172.16.255.254/32 on r3. -""" - -import os -import sys -import json -import time -import pytest -import functools - -CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, "../")) - -# pylint: disable=C0413 -from lib import topotest -from lib.topogen import Topogen, TopoRouter, get_topogen -from lib.topolog import logger -from mininet.topo import Topo - - -class TemplateTopo(Topo): - def build(self, *_args, **_opts): - tgen = get_topogen(self) - - for routern in range(1, 4): - tgen.add_router("r{}".format(routern)) - - switch = tgen.add_switch("s1") - switch.add_link(tgen.gears["r1"]) - switch.add_link(tgen.gears["r2"]) - - switch = tgen.add_switch("s2") - switch.add_link(tgen.gears["r2"]) - switch.add_link(tgen.gears["r3"]) - - -def setup_module(mod): - tgen = Topogen(TemplateTopo, mod.__name__) - tgen.start_topology() - - router_list = tgen.routers() - - for i, (rname, router) in enumerate(router_list.iteritems(), 1): - router.load_config( - TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) - ) - router.load_config( - TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) - ) - - tgen.start_router() - - -def teardown_module(mod): - tgen = get_topogen() - tgen.stop_topology() - - -def test_bgp_show_ip_bgp_hostname(): - tgen = get_topogen() - - if tgen.routers_have_failure(): - pytest.skip(tgen.errors) - - def _bgp_converge(router): - output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.254/32 json")) - expected = {"prefix": "172.16.255.254/32"} - return topotest.json_cmp(output, expected) - - def _bgp_show_nexthop_hostname_and_ip(router): - output = json.loads(router.vtysh_cmd("show ip bgp json")) - for nh in output["routes"]["172.16.255.254/32"][0]["nexthops"]: - if "hostname" in nh and "ip" in nh: - return True - return False - - def _bgp_show_nexthop_ip_only(router): - output = json.loads(router.vtysh_cmd("show ip bgp json")) - for nh in output["routes"]["172.16.255.254/32"][0]["nexthops"]: - if "ip" in nh and not "hostname" in nh: - return True - return False - - test_func = functools.partial(_bgp_converge, tgen.gears["r2"]) - success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - - test_func = functools.partial(_bgp_converge, tgen.gears["r3"]) - success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - - assert result is None, 'Failed bgp convergence in "{}"'.format(tgen.gears["r2"]) - assert _bgp_show_nexthop_hostname_and_ip(tgen.gears["r2"]) == True - - assert result is None, 'Failed bgp convergence in "{}"'.format(tgen.gears["r3"]) - assert _bgp_show_nexthop_ip_only(tgen.gears["r3"]) == True - - -if __name__ == "__main__": - args = ["-s"] + sys.argv[1:] - sys.exit(pytest.main(args)) diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py index 38e4e1fce5..971bbd0f3b 100644 --- a/tests/topotests/lib/bgp.py +++ b/tests/topotests/lib/bgp.py @@ -21,7 +21,7 @@ from copy import deepcopy from time import sleep import traceback -import ipaddr +import ipaddress import os import sys from lib import topotest @@ -44,7 +44,6 @@ from lib.common_config import ( LOGDIR = "/tmp/topotests/" TMPDIR = None - def create_router_bgp(tgen, topo, input_dict=None, build=False, load_config=True): """ API to configure bgp on router @@ -381,10 +380,10 @@ def __create_bgp_unicast_neighbor( del_action = advertise_network_dict.setdefault("delete", False) # Generating IPs for verification - prefix = str(ipaddr.IPNetwork(unicode(network[0])).prefixlen) + prefix = str(ipaddress.ip_network(unicode(network[0])).prefixlen) network_list = generate_ips(network, no_of_network) for ip in network_list: - ip = str(ipaddr.IPNetwork(unicode(ip)).network) + ip = str(ipaddress.ip_network(unicode(ip)).network_address) cmd = "network {}/{}".format(ip, prefix) if del_action: @@ -859,7 +858,7 @@ def verify_router_id(tgen, topo, input_dict): logger.info("Checking router %s router-id", router) show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True) router_id_out = show_bgp_json["ipv4Unicast"]["routerId"] - router_id_out = ipaddr.IPv4Address(unicode(router_id_out)) + router_id_out = ipaddress.IPv4Address(unicode(router_id_out)) # Once router-id is deleted, highest interface ip should become # router-id @@ -867,7 +866,7 @@ def verify_router_id(tgen, topo, input_dict): router_id = find_interface_with_greater_ip(topo, router) else: router_id = input_dict[router]["bgp"]["router_id"] - router_id = ipaddr.IPv4Address(unicode(router_id)) + router_id = ipaddress.IPv4Address(unicode(router_id)) if router_id == router_id_out: logger.info("Found expected router-id %s for router %s", router_id, router) @@ -882,7 +881,7 @@ def verify_router_id(tgen, topo, input_dict): return True -@retry(attempts=44, wait=3, return_is_str=True) +@retry(attempts=50, wait=3, return_is_str=True) def verify_bgp_convergence(tgen, topo, dut=None): """ API will verify if BGP is converged with in the given time frame. @@ -1052,11 +1051,13 @@ def verify_bgp_convergence(tgen, topo, dut=None): if nh_state == "Established": no_of_peer += 1 - if no_of_peer == total_peer: - logger.info("[DUT: %s] VRF: %s, BGP is Converged", router, vrf) - else: - errormsg = "[DUT: %s] VRF: %s, BGP is not converged" % (router, vrf) - return errormsg + if no_of_peer == total_peer: + logger.info("[DUT: %s] VRF: %s, BGP is Converged for %s address-family", + router, vrf, addr_type) + else: + errormsg = ("[DUT: %s] VRF: %s, BGP is not converged for %s address-family" % + (router, vrf, addr_type)) + return errormsg logger.debug("Exiting API: verify_bgp_convergence()") return True @@ -1326,7 +1327,7 @@ def verify_as_numbers(tgen, topo, input_dict): return True -@retry(attempts=44, wait=3, return_is_str=True) +@retry(attempts=50, wait=3, return_is_str=True) def verify_bgp_convergence_from_running_config(tgen, dut=None): """ API to verify BGP convergence b/w loopback and physical interface. @@ -1467,9 +1468,10 @@ def clear_bgp_and_verify(tgen, topo, router): rnode = tgen.routers()[router] peer_uptime_before_clear_bgp = {} + sleeptime = 3 + # Verifying BGP convergence before bgp clear command - for retry in range(44): - sleeptime = 3 + for retry in range(50): # Waiting for BGP to converge logger.info( "Waiting for %s sec for BGP to converge on router" " %s...", @@ -1536,8 +1538,8 @@ def clear_bgp_and_verify(tgen, topo, router): ) else: errormsg = ( - "TIMEOUT!! BGP is not converged in 30 seconds for" - " router {}".format(router) + "TIMEOUT!! BGP is not converged in {} seconds for" + " router {}".format(retry * sleeptime, router) ) return errormsg @@ -1551,8 +1553,8 @@ def clear_bgp_and_verify(tgen, topo, router): peer_uptime_after_clear_bgp = {} # Verifying BGP convergence after bgp clear command - for retry in range(44): - sleeptime = 3 + for retry in range(50): + # Waiting for BGP to converge logger.info( "Waiting for %s sec for BGP to converge on router" " %s...", @@ -1615,8 +1617,8 @@ def clear_bgp_and_verify(tgen, topo, router): ) else: errormsg = ( - "TIMEOUT!! BGP is not converged in 30 seconds for" - " router {}".format(router) + "TIMEOUT!! BGP is not converged in {} seconds for" + " router {}".format(retry * sleeptime, router) ) return errormsg @@ -2102,7 +2104,7 @@ def verify_best_path_as_per_bgp_attribute( routes = generate_ips(_network, no_of_ip) for route in routes: - route = str(ipaddr.IPNetwork(unicode(route))) + route = str(ipaddress.ip_network(unicode(route))) if route in sh_ip_bgp_json["routes"]: route_attributes = sh_ip_bgp_json["routes"][route] @@ -2411,7 +2413,7 @@ def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None) ip_list = generate_ips(network, no_of_ip) for st_rt in ip_list: - st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + st_rt = str(ipaddress.ip_network(unicode(st_rt))) _addr_type = validate_ip_address(st_rt) if _addr_type != addr_type: @@ -2547,7 +2549,7 @@ def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None) ip_list = generate_ips(network, no_of_network) for st_rt in ip_list: - st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + st_rt = str(ipaddress.ip_network(unicode(st_rt))) _addr_type = validate_ip_address(st_rt) if _addr_type != addr_type: diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py index 21ed47fc4b..d72d0aa223 100644 --- a/tests/topotests/lib/common_config.py +++ b/tests/topotests/lib/common_config.py @@ -36,7 +36,7 @@ import sys import ConfigParser import traceback import socket -import ipaddr +import ipaddress from lib.topolog import logger, logger_config from lib.topogen import TopoRouter, get_topogen @@ -690,6 +690,12 @@ def start_topology(tgen): router_list = tgen.routers() for rname in ROUTER_LIST: router = router_list[rname] + + # It will help in debugging the failures, will give more details on which + # specific kernel version tests are failing + linux_ver = router.run("uname -a") + logger.info("Logging platform related details: \n %s \n", linux_ver) + try: os.chdir(TMPDIR) @@ -1066,10 +1072,10 @@ def generate_ips(network, no_of_ips): addr_type = validate_ip_address(start_ip) if addr_type == "ipv4": - start_ip = ipaddr.IPv4Address(unicode(start_ip)) + start_ip = ipaddress.IPv4Address(unicode(start_ip)) step = 2 ** (32 - mask) if addr_type == "ipv6": - start_ip = ipaddr.IPv6Address(unicode(start_ip)) + start_ip = ipaddress.IPv6Address(unicode(start_ip)) step = 2 ** (128 - mask) next_ip = start_ip @@ -1077,7 +1083,7 @@ def generate_ips(network, no_of_ips): while count < no_of_ips: ipaddress_list.append("{}/{}".format(next_ip, mask)) if addr_type == "ipv6": - next_ip = ipaddr.IPv6Address(int(next_ip) + step) + next_ip = ipaddress.IPv6Address(int(next_ip) + step) else: next_ip += step count += 1 @@ -2273,7 +2279,7 @@ def verify_rib( nh_found = False for st_rt in ip_list: - st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + st_rt = str(ipaddress.ip_network(unicode(st_rt))) _addr_type = validate_ip_address(st_rt) if _addr_type != addr_type: @@ -2469,7 +2475,7 @@ def verify_rib( nh_found = False for st_rt in ip_list: - st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + st_rt = str(ipaddress.ip_network(unicode(st_rt))) _addr_type = validate_ip_address(st_rt) if _addr_type != addr_type: diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py index 24b61981d6..9c2baedde4 100644 --- a/tests/topotests/lib/topojson.py +++ b/tests/topotests/lib/topojson.py @@ -21,7 +21,7 @@ from collections import OrderedDict from json import dumps as json_dumps from re import search as re_search -import ipaddr +import ipaddress import pytest # Import topogen and topotest helpers @@ -65,12 +65,12 @@ def build_topo_from_json(tgen, topo): listRouters.append(routerN) if "ipv4base" in topo: - ipv4Next = ipaddr.IPv4Address(topo["link_ip_start"]["ipv4"]) + ipv4Next = ipaddress.IPv4Address(topo["link_ip_start"]["ipv4"]) ipv4Step = 2 ** (32 - topo["link_ip_start"]["v4mask"]) if topo["link_ip_start"]["v4mask"] < 32: ipv4Next += 1 if "ipv6base" in topo: - ipv6Next = ipaddr.IPv6Address(topo["link_ip_start"]["ipv6"]) + ipv6Next = ipaddress.IPv6Address(topo["link_ip_start"]["ipv6"]) ipv6Step = 2 ** (128 - topo["link_ip_start"]["v6mask"]) if topo["link_ip_start"]["v6mask"] < 127: ipv6Next += 1 @@ -181,7 +181,7 @@ def build_topo_from_json(tgen, topo): destRouter_link_json["ipv6"] = "{}/{}".format( ipv6Next + 1, topo["link_ip_start"]["v6mask"] ) - ipv6Next = ipaddr.IPv6Address(int(ipv6Next) + ipv6Step) + ipv6Next = ipaddress.IPv6Address(int(ipv6Next) + ipv6Step) logger.debug( "Generated link data for router: %s\n%s", diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py index 22ed4b4d0f..bffb8208e7 100644 --- a/tests/topotests/lib/topotest.py +++ b/tests/topotests/lib/topotest.py @@ -1180,9 +1180,11 @@ class Router(Node): def startRouterDaemons(self, daemons=None): "Starts all FRR daemons for this router." - bundle_data = subprocess.check_output( - ["cat /etc/frr/support_bundle_commands.conf"], shell=True - ) + bundle_data = '' + + if os.path.exists('/etc/frr/support_bundle_commands.conf'): + bundle_data = subprocess.check_output( + ["cat /etc/frr/support_bundle_commands.conf"], shell=True) self.cmd( "echo '{}' > /etc/frr/support_bundle_commands.conf".format(bundle_data) ) diff --git a/tests/topotests/zebra_rib/r1/v4_route_1_static_override.json b/tests/topotests/zebra_rib/r1/v4_route_1_static_override.json index aa9522aff6..22e199f9aa 100644 --- a/tests/topotests/zebra_rib/r1/v4_route_1_static_override.json +++ b/tests/topotests/zebra_rib/r1/v4_route_1_static_override.json @@ -10,7 +10,7 @@ "installed":true, "table":254, "internalStatus":16, - "internalFlags":72, + "internalFlags":73, "internalNextHopNum":1, "internalNextHopActiveNum":1, "nexthops":[ diff --git a/tools/frr-reload.py b/tools/frr-reload.py index 200279b125..9e86cf2156 100755 --- a/tools/frr-reload.py +++ b/tools/frr-reload.py @@ -588,6 +588,7 @@ end line.startswith("vnc defaults") or line.startswith("vnc l2-group") or line.startswith("vnc nve-group") or + line.startswith("peer") or line.startswith("member pseudowire")): main_ctx_key = [] diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c index 29e0842daf..4bf62d130e 100644 --- a/vtysh/vtysh.c +++ b/vtysh/vtysh.c @@ -809,6 +809,9 @@ int vtysh_mark_file(const char *filename) } else if ((prev_node == KEYCHAIN_KEY_NODE) && (tried == 1)) { vty_out(vty, "exit\n"); + } else if ((prev_node == BFD_PEER_NODE) + && (tried == 1)) { + vty_out(vty, "exit\n"); } else if (tried) { vty_out(vty, "end\n"); } @@ -1478,6 +1481,13 @@ static struct cmd_node rpki_node = { .prompt = "%s(config-rpki)# ", }; +static struct cmd_node rpki_vrf_node = { + .name = "rpki", + .node = RPKI_VRF_NODE, + .parent_node = VRF_NODE, + .prompt = "%s(config-vrf-rpki)# ", +}; + #if HAVE_BFDD > 0 static struct cmd_node bfd_node = { .name = "bfd", @@ -1660,12 +1670,25 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv6_labeled_unicast, } DEFUNSH(VTYSH_BGPD, + no_rpki, + no_rpki_cmd, + "no rpki", + NO_STR + "rpki\n") +{ + return CMD_SUCCESS; +} + +DEFUNSH(VTYSH_BGPD, rpki, rpki_cmd, "rpki", "Enable rpki and enter rpki configuration mode\n") { - vty->node = RPKI_NODE; + if (vty->node == CONFIG_NODE) + vty->node = RPKI_NODE; + else + vty->node = RPKI_VRF_NODE; return CMD_SUCCESS; } @@ -3815,6 +3838,7 @@ void vtysh_init_vty(void) install_node(&vty_node); install_node(&rpki_node); install_node(&bmp_node); + install_node(&rpki_vrf_node); #if HAVE_BFDD > 0 install_node(&bfd_node); install_node(&bfd_peer_node); @@ -4059,9 +4083,15 @@ void vtysh_init_vty(void) install_element(BMP_NODE, &vtysh_end_all_cmd); install_element(CONFIG_NODE, &rpki_cmd); + install_element(CONFIG_NODE, &no_rpki_cmd); + install_element(VRF_NODE, &rpki_cmd); + install_element(VRF_NODE, &no_rpki_cmd); install_element(RPKI_NODE, &rpki_exit_cmd); install_element(RPKI_NODE, &rpki_quit_cmd); install_element(RPKI_NODE, &vtysh_end_all_cmd); + install_element(RPKI_VRF_NODE, &rpki_exit_cmd); + install_element(RPKI_VRF_NODE, &rpki_quit_cmd); + install_element(RPKI_VRF_NODE, &vtysh_end_all_cmd); /* EVPN commands */ install_element(BGP_EVPN_NODE, &bgp_evpn_vni_cmd); diff --git a/vtysh/vtysh.h b/vtysh/vtysh.h index d0edbb2710..97ae8b4890 100644 --- a/vtysh/vtysh.h +++ b/vtysh/vtysh.h @@ -54,7 +54,7 @@ DECLARE_MGROUP(MVTYSH) #define VTYSH_ALL VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_LDPD|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_NHRPD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_SHARPD|VTYSH_PBRD|VTYSH_STATICD|VTYSH_BFDD|VTYSH_FABRICD|VTYSH_VRRPD #define VTYSH_RMAP VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_EIGRPD|VTYSH_SHARPD|VTYSH_FABRICD #define VTYSH_INTERFACE VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_ISISD|VTYSH_PIMD|VTYSH_NHRPD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_PBRD|VTYSH_FABRICD|VTYSH_VRRPD -#define VTYSH_VRF VTYSH_ZEBRA|VTYSH_PIMD|VTYSH_STATICD +#define VTYSH_VRF VTYSH_ZEBRA|VTYSH_PIMD|VTYSH_STATICD|VTYSH_BGPD #define VTYSH_KEYS VTYSH_RIPD|VTYSH_EIGRPD /* Daemons who can process nexthop-group configs */ #define VTYSH_NH_GROUP VTYSH_PBRD|VTYSH_SHARPD diff --git a/vtysh/vtysh_config.c b/vtysh/vtysh_config.c index 61bcf3b658..ab0c2b65c3 100644 --- a/vtysh/vtysh_config.c +++ b/vtysh/vtysh_config.c @@ -265,13 +265,22 @@ void vtysh_config_parse_line(void *arg, const char *line) config_add_line(config->line, line); } else if (!strncmp(line, " ip mroute", strlen(" ip mroute"))) { config_add_line_uniq_end(config->line, line); + } else if ((strncmp(line, " rpki", strlen(" rpki")) == 0) + && config->index == VRF_NODE) { + config_add_line(config->line, line); + config->index = RPKI_VRF_NODE; } else if (config->index == RMAP_NODE || config->index == INTERFACE_NODE || config->index == VTY_NODE || config->index == VRF_NODE || config->index == NH_GROUP_NODE) config_add_line_uniq(config->line, line); - else + else if (config->index == RPKI_VRF_NODE + && strncmp(line, " exit", + strlen(" exit")) == 0) { + config_add_line(config->line, line); + config->index = VRF_NODE; + } else config_add_line(config->line, line); } else config_add_line(config_top, line); @@ -403,6 +412,8 @@ void vtysh_config_parse_line(void *arg, const char *line) config = config_get(MPLS_NODE, line); else if (strncmp(line, "bfd", strlen("bfd")) == 0) config = config_get(BFD_NODE, line); + else if (strncmp(line, "rpki", strlen("rpki")) == 0) + config = config_get(RPKI_NODE, line); else { if (strncmp(line, "log", strlen("log")) == 0 || strncmp(line, "hostname", strlen("hostname")) diff --git a/zebra/interface.c b/zebra/interface.c index 9d1f70609b..982a63a022 100644 --- a/zebra/interface.c +++ b/zebra/interface.c @@ -1709,7 +1709,7 @@ struct cmd_node interface_node = { #endif /* Show all interfaces to vty. */ DEFPY(show_interface, show_interface_cmd, - "show interface [vrf NAME$vrf_name] [brief$brief]", + "show interface vrf NAME$vrf_name [brief$brief]", SHOW_STR "Interface status and configuration\n" VRF_CMD_HELP_STR @@ -1717,15 +1717,15 @@ DEFPY(show_interface, show_interface_cmd, { struct vrf *vrf; struct interface *ifp; - vrf_id_t vrf_id = VRF_DEFAULT; interface_update_stats(); - if (vrf_name) - VRF_GET_ID(vrf_id, vrf_name, false); + vrf = vrf_lookup_by_name(vrf_name); + if (!vrf) { + vty_out(vty, "%% VRF %s not found\n", vrf_name); + return CMD_WARNING; + } - /* All interface print. */ - vrf = vrf_lookup_by_id(vrf_id); if (brief) { ifs_dump_brief_vty(vty, vrf); } else { @@ -1741,7 +1741,7 @@ DEFPY(show_interface, show_interface_cmd, /* Show all interfaces to vty. */ DEFPY (show_interface_vrf_all, show_interface_vrf_all_cmd, - "show interface vrf all [brief$brief]", + "show interface [vrf all] [brief$brief]", SHOW_STR "Interface status and configuration\n" VRF_ALL_CMD_HELP_STR @@ -1778,14 +1778,17 @@ DEFUN (show_interface_name_vrf, int idx_ifname = 2; int idx_name = 4; struct interface *ifp; - vrf_id_t vrf_id; + struct vrf *vrf; interface_update_stats(); - VRF_GET_ID(vrf_id, argv[idx_name]->arg, false); + vrf = vrf_lookup_by_name(argv[idx_name]->arg); + if (!vrf) { + vty_out(vty, "%% VRF %s not found\n", argv[idx_name]->arg); + return CMD_WARNING; + } - /* Specified interface print. */ - ifp = if_lookup_by_name(argv[idx_ifname]->arg, vrf_id); + ifp = if_lookup_by_name_vrf(argv[idx_ifname]->arg, vrf); if (ifp == NULL) { vty_out(vty, "%% Can't find interface %s\n", argv[idx_ifname]->arg); @@ -1806,35 +1809,23 @@ DEFUN (show_interface_name_vrf_all, VRF_ALL_CMD_HELP_STR) { int idx_ifname = 2; - struct vrf *vrf; struct interface *ifp; - int found = 0; interface_update_stats(); - /* All interface print. */ - RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { - /* Specified interface print. */ - ifp = if_lookup_by_name(argv[idx_ifname]->arg, vrf->vrf_id); - if (ifp) { - if_dump_vty(vty, ifp); - found++; - } - } - - if (!found) { + ifp = if_lookup_by_name_all_vrf(argv[idx_ifname]->arg); + if (ifp == NULL) { vty_out(vty, "%% Can't find interface %s\n", argv[idx_ifname]->arg); return CMD_WARNING; } + if_dump_vty(vty, ifp); return CMD_SUCCESS; } - -static void if_show_description(struct vty *vty, vrf_id_t vrf_id) +static void if_show_description(struct vty *vty, struct vrf *vrf) { - struct vrf *vrf = vrf_lookup_by_id(vrf_id); struct interface *ifp; vty_out(vty, "Interface Status Protocol Description\n"); @@ -1882,18 +1873,21 @@ static void if_show_description(struct vty *vty, vrf_id_t vrf_id) DEFUN (show_interface_desc, show_interface_desc_cmd, - "show interface description [vrf NAME]", + "show interface description vrf NAME", SHOW_STR "Interface status and configuration\n" "Interface description\n" VRF_CMD_HELP_STR) { - vrf_id_t vrf_id = VRF_DEFAULT; + struct vrf *vrf; - if (argc > 3) - VRF_GET_ID(vrf_id, argv[4]->arg, false); + vrf = vrf_lookup_by_name(argv[4]->arg); + if (!vrf) { + vty_out(vty, "%% VRF %s not found\n", argv[4]->arg); + return CMD_WARNING; + } - if_show_description(vty, vrf_id); + if_show_description(vty, vrf); return CMD_SUCCESS; } @@ -1901,7 +1895,7 @@ DEFUN (show_interface_desc, DEFUN (show_interface_desc_vrf_all, show_interface_desc_vrf_all_cmd, - "show interface description vrf all", + "show interface description [vrf all]", SHOW_STR "Interface status and configuration\n" "Interface description\n" @@ -1913,7 +1907,7 @@ DEFUN (show_interface_desc_vrf_all, if (!RB_EMPTY(if_name_head, &vrf->ifaces_by_name)) { vty_out(vty, "\n\tVRF %s(%u)\n\n", VRF_LOGNAME(vrf), vrf->vrf_id); - if_show_description(vty, vrf->vrf_id); + if_show_description(vty, vrf); } return CMD_SUCCESS; diff --git a/zebra/kernel_netlink.c b/zebra/kernel_netlink.c index 75a8e9f17d..a4d22c12a4 100644 --- a/zebra/kernel_netlink.c +++ b/zebra/kernel_netlink.c @@ -698,6 +698,202 @@ static void netlink_parse_extended_ack(struct nlmsghdr *h) } /* + * netlink_send_msg - send a netlink message of a certain size. + * + * Returns -1 on error. Otherwise, it returns the number of bytes sent. + */ +static ssize_t netlink_send_msg(const struct nlsock *nl, void *buf, + size_t buflen) +{ + struct sockaddr_nl snl = {}; + struct iovec iov = {}; + struct msghdr msg = {}; + ssize_t status; + int save_errno = 0; + + iov.iov_base = buf; + iov.iov_len = buflen; + msg.msg_name = &snl; + msg.msg_namelen = sizeof(snl); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + snl.nl_family = AF_NETLINK; + + /* Send message to netlink interface. */ + frr_with_privs(&zserv_privs) { + status = sendmsg(nl->sock, &msg, 0); + save_errno = errno; + } + + if (IS_ZEBRA_DEBUG_KERNEL_MSGDUMP_SEND) { + zlog_debug("%s: >> netlink message dump [sent]", __func__); + zlog_hexdump(buf, buflen); + } + + if (status == -1) { + flog_err_sys(EC_LIB_SOCKET, "%s error: %s", __func__, + safe_strerror(save_errno)); + return -1; + } + + return status; +} + +/* + * netlink_recv_msg - receive a netlink message. + * + * Returns -1 on error, 0 if read would block or the number of bytes received. + */ +static int netlink_recv_msg(const struct nlsock *nl, struct msghdr msg, + void *buf, size_t buflen) +{ + struct iovec iov; + int status; + + iov.iov_base = buf; + iov.iov_len = buflen; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + do { +#if defined(HANDLE_NETLINK_FUZZING) + /* Check if reading and filename is set */ + if (netlink_read && '\0' != netlink_fuzz_file[0]) { + zlog_debug("Reading netlink fuzz file"); + status = netlink_read_file(buf, netlink_fuzz_file); + ((struct sockaddr_nl *)msg.msg_name)->nl_pid = 0; + } else { + status = recvmsg(nl->sock, &msg, 0); + } +#else + status = recvmsg(nl->sock, &msg, 0); +#endif /* HANDLE_NETLINK_FUZZING */ + } while (status == -1 && errno == EINTR); + + if (status == -1) { + if (errno == EWOULDBLOCK || errno == EAGAIN) + return 0; + flog_err(EC_ZEBRA_RECVMSG_OVERRUN, "%s recvmsg overrun: %s", + nl->name, safe_strerror(errno)); + /* + * In this case we are screwed. There is no good way to recover + * zebra at this point. + */ + exit(-1); + } + + if (status == 0) { + flog_err_sys(EC_LIB_SOCKET, "%s EOF", nl->name); + return -1; + } + + if (msg.msg_namelen != sizeof(struct sockaddr_nl)) { + flog_err(EC_ZEBRA_NETLINK_LENGTH_ERROR, + "%s sender address length error: length %d", nl->name, + msg.msg_namelen); + return -1; + } + + if (IS_ZEBRA_DEBUG_KERNEL_MSGDUMP_RECV) { + zlog_debug("%s: << netlink message dump [recv]", __func__); + zlog_hexdump(buf, status); + } + +#if defined(HANDLE_NETLINK_FUZZING) + if (!netlink_read) { + zlog_debug("Writing incoming netlink message"); + netlink_write_incoming(buf, status, netlink_file_counter++); + } +#endif /* HANDLE_NETLINK_FUZZING */ + + return status; +} + +/* + * netlink_parse_error - parse a netlink error message + * + * Returns 1 if this message is acknowledgement, 0 if this error should be + * ignored, -1 otherwise. + */ +static int netlink_parse_error(const struct nlsock *nl, struct nlmsghdr *h, + const struct zebra_dplane_info *zns, + bool startup) +{ + struct nlmsgerr *err = (struct nlmsgerr *)NLMSG_DATA(h); + int errnum = err->error; + int msg_type = err->msg.nlmsg_type; + + if (h->nlmsg_len < NLMSG_LENGTH(sizeof(struct nlmsgerr))) { + flog_err(EC_ZEBRA_NETLINK_LENGTH_ERROR, + "%s error: message truncated", nl->name); + return -1; + } + + /* + * Parse the extended information before we actually handle it. At this + * point in time we do not do anything other than report the issue. + */ + if (h->nlmsg_flags & NLM_F_ACK_TLVS) + netlink_parse_extended_ack(h); + + /* If the error field is zero, then this is an ACK. */ + if (err->error == 0) { + if (IS_ZEBRA_DEBUG_KERNEL) { + zlog_debug("%s: %s ACK: type=%s(%u), seq=%u, pid=%u", + __func__, nl->name, + nl_msg_type_to_str(err->msg.nlmsg_type), + err->msg.nlmsg_type, err->msg.nlmsg_seq, + err->msg.nlmsg_pid); + } + + return 1; + } + + /* Deal with errors that occur because of races in link handling. */ + if (zns->is_cmd + && ((msg_type == RTM_DELROUTE + && (-errnum == ENODEV || -errnum == ESRCH)) + || (msg_type == RTM_NEWROUTE + && (-errnum == ENETDOWN || -errnum == EEXIST)))) { + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug("%s: error: %s type=%s(%u), seq=%u, pid=%u", + nl->name, safe_strerror(-errnum), + nl_msg_type_to_str(msg_type), msg_type, + err->msg.nlmsg_seq, err->msg.nlmsg_pid); + return 0; + } + + /* + * We see RTM_DELNEIGH when shutting down an interface with an IPv4 + * link-local. The kernel should have already deleted the neighbor so + * do not log these as an error. + */ + if (msg_type == RTM_DELNEIGH + || (zns->is_cmd && msg_type == RTM_NEWROUTE + && (-errnum == ESRCH || -errnum == ENETUNREACH))) { + /* + * This is known to happen in some situations, don't log as + * error. + */ + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug("%s error: %s, type=%s(%u), seq=%u, pid=%u", + nl->name, safe_strerror(-errnum), + nl_msg_type_to_str(msg_type), msg_type, + err->msg.nlmsg_seq, err->msg.nlmsg_pid); + } else { + if ((msg_type != RTM_GETNEXTHOP) || !startup) + flog_err(EC_ZEBRA_UNEXPECTED_MESSAGE, + "%s error: %s, type=%s(%u), seq=%u, pid=%u", + nl->name, safe_strerror(-errnum), + nl_msg_type_to_str(msg_type), msg_type, + err->msg.nlmsg_seq, err->msg.nlmsg_pid); + } + + return -1; +} + +/* * netlink_parse_info * * Receive message from netlink interface and pass those information @@ -722,71 +918,19 @@ int netlink_parse_info(int (*filter)(struct nlmsghdr *, ns_id_t, int), while (1) { char buf[NL_RCV_PKT_BUF_SIZE]; - struct iovec iov = {.iov_base = buf, .iov_len = sizeof(buf)}; struct sockaddr_nl snl; struct msghdr msg = {.msg_name = (void *)&snl, - .msg_namelen = sizeof(snl), - .msg_iov = &iov, - .msg_iovlen = 1}; + .msg_namelen = sizeof(snl)}; struct nlmsghdr *h; if (count && read_in >= count) return 0; -#if defined(HANDLE_NETLINK_FUZZING) - /* Check if reading and filename is set */ - if (netlink_read && '\0' != netlink_fuzz_file[0]) { - zlog_debug("Reading netlink fuzz file"); - status = netlink_read_file(buf, netlink_fuzz_file); - snl.nl_pid = 0; - } else { - status = recvmsg(nl->sock, &msg, 0); - } -#else - status = recvmsg(nl->sock, &msg, 0); -#endif /* HANDLE_NETLINK_FUZZING */ - if (status < 0) { - if (errno == EINTR) - continue; - if (errno == EWOULDBLOCK || errno == EAGAIN) - break; - flog_err(EC_ZEBRA_RECVMSG_OVERRUN, - "%s recvmsg overrun: %s", nl->name, - safe_strerror(errno)); - /* - * In this case we are screwed. - * There is no good way to - * recover zebra at this point. - */ - exit(-1); - continue; - } - - if (status == 0) { - flog_err_sys(EC_LIB_SOCKET, "%s EOF", nl->name); - return -1; - } - - if (msg.msg_namelen != sizeof(snl)) { - flog_err(EC_ZEBRA_NETLINK_LENGTH_ERROR, - "%s sender address length error: length %d", - nl->name, msg.msg_namelen); + status = netlink_recv_msg(nl, msg, buf, sizeof(buf)); + if (status == -1) return -1; - } - - if (IS_ZEBRA_DEBUG_KERNEL_MSGDUMP_RECV) { - zlog_debug("%s: << netlink message dump [recv]", - __func__); - zlog_hexdump(buf, status); - } - -#if defined(HANDLE_NETLINK_FUZZING) - if (!netlink_read) { - zlog_debug("Writing incoming netlink message"); - netlink_write_incoming(buf, status, - netlink_file_counter++); - } -#endif /* HANDLE_NETLINK_FUZZING */ + else if (status == 0) + break; read_in++; for (h = (struct nlmsghdr *)buf; @@ -798,112 +942,14 @@ int netlink_parse_info(int (*filter)(struct nlmsghdr *, ns_id_t, int), /* Error handling. */ if (h->nlmsg_type == NLMSG_ERROR) { - struct nlmsgerr *err = - (struct nlmsgerr *)NLMSG_DATA(h); - int errnum = err->error; - int msg_type = err->msg.nlmsg_type; - - if (h->nlmsg_len - < NLMSG_LENGTH(sizeof(struct nlmsgerr))) { - flog_err(EC_ZEBRA_NETLINK_LENGTH_ERROR, - "%s error: message truncated", - nl->name); - return -1; - } - - /* - * Parse the extended information before - * we actually handle it. - * At this point in time we do not - * do anything other than report the - * issue. - */ - if (h->nlmsg_flags & NLM_F_ACK_TLVS) - netlink_parse_extended_ack(h); - - /* If the error field is zero, then this is an - * ACK */ - if (err->error == 0) { - if (IS_ZEBRA_DEBUG_KERNEL) { - zlog_debug( - "%s: %s ACK: type=%s(%u), seq=%u, pid=%u", - __func__, nl->name, - nl_msg_type_to_str( - err->msg.nlmsg_type), - err->msg.nlmsg_type, - err->msg.nlmsg_seq, - err->msg.nlmsg_pid); - } - - /* return if not a multipart message, - * otherwise continue */ + int err = netlink_parse_error(nl, h, zns, + startup); + if (err == 1) { if (!(h->nlmsg_flags & NLM_F_MULTI)) return 0; continue; - } - - /* Deal with errors that occur because of races - * in link handling */ - if (zns->is_cmd - && ((msg_type == RTM_DELROUTE - && (-errnum == ENODEV - || -errnum == ESRCH)) - || (msg_type == RTM_NEWROUTE - && (-errnum == ENETDOWN - || -errnum == EEXIST)))) { - if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug( - "%s: error: %s type=%s(%u), seq=%u, pid=%u", - nl->name, - safe_strerror(-errnum), - nl_msg_type_to_str( - msg_type), - msg_type, - err->msg.nlmsg_seq, - err->msg.nlmsg_pid); - return 0; - } - - /* We see RTM_DELNEIGH when shutting down an - * interface with an IPv4 - * link-local. The kernel should have already - * deleted the neighbor - * so do not log these as an error. - */ - if (msg_type == RTM_DELNEIGH - || (zns->is_cmd && msg_type == RTM_NEWROUTE - && (-errnum == ESRCH - || -errnum == ENETUNREACH))) { - /* This is known to happen in some - * situations, don't log - * as error. - */ - if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug( - "%s error: %s, type=%s(%u), seq=%u, pid=%u", - nl->name, - safe_strerror(-errnum), - nl_msg_type_to_str( - msg_type), - msg_type, - err->msg.nlmsg_seq, - err->msg.nlmsg_pid); - } else { - if ((msg_type != RTM_GETNEXTHOP) - || !startup) - flog_err( - EC_ZEBRA_UNEXPECTED_MESSAGE, - "%s error: %s, type=%s(%u), seq=%u, pid=%u", - nl->name, - safe_strerror(-errnum), - nl_msg_type_to_str( - msg_type), - msg_type, - err->msg.nlmsg_seq, - err->msg.nlmsg_pid); - } - - return -1; + } else + return err; } /* OK we got netlink message. */ @@ -966,26 +1012,8 @@ int netlink_talk_info(int (*filter)(struct nlmsghdr *, ns_id_t, int startup), struct nlmsghdr *n, const struct zebra_dplane_info *dp_info, int startup) { - int status = 0; - struct sockaddr_nl snl; - struct iovec iov; - struct msghdr msg; - int save_errno = 0; const struct nlsock *nl; - memset(&snl, 0, sizeof(snl)); - memset(&iov, 0, sizeof(iov)); - memset(&msg, 0, sizeof(msg)); - - iov.iov_base = n; - iov.iov_len = n->nlmsg_len; - msg.msg_name = (void *)&snl; - msg.msg_namelen = sizeof(snl); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - snl.nl_family = AF_NETLINK; - nl = &(dp_info->nls); n->nlmsg_seq = nl->seq; n->nlmsg_pid = nl->snl.nl_pid; @@ -997,22 +1025,8 @@ int netlink_talk_info(int (*filter)(struct nlmsghdr *, ns_id_t, int startup), n->nlmsg_type, n->nlmsg_len, n->nlmsg_seq, n->nlmsg_flags); - /* Send message to netlink interface. */ - frr_with_privs(&zserv_privs) { - status = sendmsg(nl->sock, &msg, 0); - save_errno = errno; - } - - if (IS_ZEBRA_DEBUG_KERNEL_MSGDUMP_SEND) { - zlog_debug("%s: >> netlink message dump [sent]", __func__); - zlog_hexdump(n, n->nlmsg_len); - } - - if (status < 0) { - flog_err_sys(EC_LIB_SOCKET, "netlink_talk sendmsg() error: %s", - safe_strerror(save_errno)); + if (netlink_send_msg(nl, n, n->nlmsg_len) == -1) return -1; - } /* * Get reply from netlink socket. @@ -1047,8 +1061,6 @@ int netlink_talk(int (*filter)(struct nlmsghdr *, ns_id_t, int startup), */ int netlink_request(struct nlsock *nl, void *req) { - int ret; - struct sockaddr_nl snl; struct nlmsghdr *n = (struct nlmsghdr *)req; /* Check netlink socket. */ @@ -1062,20 +1074,8 @@ int netlink_request(struct nlsock *nl, void *req) n->nlmsg_pid = nl->snl.nl_pid; n->nlmsg_seq = ++nl->seq; - memset(&snl, 0, sizeof(snl)); - snl.nl_family = AF_NETLINK; - - /* Raise capabilities and send message, then lower capabilities. */ - frr_with_privs(&zserv_privs) { - ret = sendto(nl->sock, req, n->nlmsg_len, 0, - (struct sockaddr *)&snl, sizeof(snl)); - } - - if (ret < 0) { - zlog_err("%s sendto failed: %s", nl->name, - safe_strerror(errno)); + if (netlink_send_msg(nl, req, n->nlmsg_len) == -1) return -1; - } return 0; } diff --git a/zebra/rib.h b/zebra/rib.h index a024b6dfaa..ec992974fa 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -94,9 +94,11 @@ struct route_entry { struct nhg_hash_entry *nhe; /* Nexthop group from FIB (optional), reflecting what is actually - * installed in the FIB if that differs. + * installed in the FIB if that differs. The 'backup' group is used + * when backup nexthops are present in the route's nhg. */ struct nexthop_group fib_ng; + struct nexthop_group fib_backup_ng; /* Nexthop group hash entry ID */ uint32_t nhe_id; @@ -526,7 +528,7 @@ DECLARE_HOOK(rib_update, (struct route_node * rn, const char *reason), /* * Access active nexthop-group, either RIB or FIB version */ -static inline struct nexthop_group *rib_active_nhg(struct route_entry *re) +static inline struct nexthop_group *rib_get_fib_nhg(struct route_entry *re) { if (re->fib_ng.nexthop) return &(re->fib_ng); @@ -534,6 +536,18 @@ static inline struct nexthop_group *rib_active_nhg(struct route_entry *re) return &(re->nhe->nhg); } +/* + * Access active nexthop-group, either RIB or FIB version + */ +static inline struct nexthop_group *rib_get_fib_backup_nhg( + struct route_entry *re) +{ + if (re->fib_backup_ng.nexthop) + return &(re->fib_backup_ng); + else + return zebra_nhg_get_backup_nhg(re->nhe); +} + extern void zebra_vty_init(void); extern pid_t pid; diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index 9883e73876..40a7eeba8e 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -1713,7 +1713,7 @@ ssize_t netlink_route_multipath_msg_encode(int cmd, nl_attr_nest_end(&req->n, nest); } - if (kernel_nexthops_supported() || force_nhg) { + if ((!fpm && kernel_nexthops_supported()) || (fpm && force_nhg)) { /* Kernel supports nexthop objects */ if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug("%s: %pFX nhg_id is %u", __func__, p, diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index a40aa8b643..dc7c595d26 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -2485,7 +2485,7 @@ static void zread_vrf_label(ZAPI_HANDLER_ARGS) if (really_remove) mpls_lsp_uninstall(def_zvrf, ltype, zvrf->label[afi], NEXTHOP_TYPE_IFINDEX, NULL, - ifp->ifindex); + ifp->ifindex, false /*backup*/); } if (nlabel != MPLS_LABEL_NONE) { diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c index 64383fc81c..e34b6f23ff 100644 --- a/zebra/zebra_dplane.c +++ b/zebra/zebra_dplane.c @@ -1149,6 +1149,37 @@ void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh) nexthop_group_copy_nh_sorted(&(ctx->u.rinfo.zd_ng), nh); } +/* + * Set the list of backup nexthops; their ordering is preserved (they're not + * re-sorted.) + */ +void dplane_ctx_set_backup_nhg(struct zebra_dplane_ctx *ctx, + const struct nexthop_group *nhg) +{ + struct nexthop *nh, *last_nh, *nexthop; + + DPLANE_CTX_VALID(ctx); + + if (ctx->u.rinfo.backup_ng.nexthop) { + nexthops_free(ctx->u.rinfo.backup_ng.nexthop); + ctx->u.rinfo.backup_ng.nexthop = NULL; + } + + last_nh = NULL; + + /* Be careful to preserve the order of the backup list */ + for (nh = nhg->nexthop; nh; nh = nh->next) { + nexthop = nexthop_dup(nh, NULL); + + if (last_nh) + NEXTHOP_APPEND(last_nh, nexthop); + else + ctx->u.rinfo.backup_ng.nexthop = nexthop; + + last_nh = nexthop; + } +} + uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx) { DPLANE_CTX_VALID(ctx); @@ -1303,7 +1334,7 @@ const struct nhlfe_list_head *dplane_ctx_get_backup_nhlfe_list( zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx, enum lsp_types_t lsp_type, enum nexthop_types_t nh_type, - union g_addr *gate, + const union g_addr *gate, ifindex_t ifindex, uint8_t num_labels, mpls_label_t *out_labels) @@ -1322,7 +1353,7 @@ zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx, zebra_nhlfe_t *dplane_ctx_add_backup_nhlfe(struct zebra_dplane_ctx *ctx, enum lsp_types_t lsp_type, enum nexthop_types_t nh_type, - union g_addr *gate, + const union g_addr *gate, ifindex_t ifindex, uint8_t num_labels, mpls_label_t *out_labels) @@ -1921,18 +1952,12 @@ done: /* * Capture information for an LSP update in a dplane context. */ -static int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, - enum dplane_op_e op, - zebra_lsp_t *lsp) +int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op, + zebra_lsp_t *lsp) { int ret = AOK; zebra_nhlfe_t *nhlfe, *new_nhlfe; - if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) - zlog_debug("init dplane ctx %s: in-label %u ecmp# %d", - dplane_op2str(op), lsp->ile.in_label, - lsp->num_ecmp); - ctx->zd_op = op; ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS; @@ -1944,6 +1969,20 @@ static int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, nhlfe_list_init(&(ctx->u.lsp.nhlfe_list)); nhlfe_list_init(&(ctx->u.lsp.backup_nhlfe_list)); + + /* This may be called to create/init a dplane context, not necessarily + * to copy an lsp object. + */ + if (lsp == NULL) { + ret = AOK; + goto done; + } + + if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) + zlog_debug("init dplane ctx %s: in-label %u ecmp# %d", + dplane_op2str(op), lsp->ile.in_label, + lsp->num_ecmp); + ctx->u.lsp.ile = lsp->ile; ctx->u.lsp.addr_family = lsp->addr_family; ctx->u.lsp.num_ecmp = lsp->num_ecmp; @@ -2012,6 +2051,7 @@ static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx, struct route_table *table; struct route_node *rn; struct route_entry *re; + const struct nexthop_group *nhg; if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u", @@ -2062,10 +2102,11 @@ static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx, break; } - if (re) + if (re) { + nhg = rib_get_fib_nhg(re); copy_nexthops(&(ctx->u.pw.nhg.nexthop), - re->nhe->nhg.nexthop, NULL); - + nhg->nexthop, NULL); + } route_unlock_node(rn); } } @@ -2442,7 +2483,7 @@ dplane_route_notif_update(struct route_node *rn, new_ctx->u.rinfo.zd_ng.nexthop = NULL; copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop), - (rib_active_nhg(re))->nexthop, NULL); + (rib_get_fib_nhg(re))->nexthop, NULL); for (ALL_NEXTHOPS(new_ctx->u.rinfo.zd_ng, nexthop)) UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h index 9e07231fea..8e873886df 100644 --- a/zebra/zebra_dplane.h +++ b/zebra/zebra_dplane.h @@ -283,6 +283,8 @@ void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance); uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx); void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh); +void dplane_ctx_set_backup_nhg(struct zebra_dplane_ctx *ctx, + const struct nexthop_group *nhg); uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx); const struct nexthop_group *dplane_ctx_get_ng( @@ -308,6 +310,14 @@ dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx *ctx); uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx); /* Accessors for LSP information */ + +/* Init the internal LSP data struct - necessary before adding to it. + * If 'lsp' is non-NULL, info will be copied from it to the internal + * context data area. + */ +int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op, + zebra_lsp_t *lsp); + mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx); void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx, mpls_label_t label); @@ -325,7 +335,7 @@ const struct nhlfe_list_head *dplane_ctx_get_backup_nhlfe_list( zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx, enum lsp_types_t lsp_type, enum nexthop_types_t nh_type, - union g_addr *gate, + const union g_addr *gate, ifindex_t ifindex, uint8_t num_labels, mpls_label_t *out_labels); @@ -333,7 +343,7 @@ zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx, zebra_nhlfe_t *dplane_ctx_add_backup_nhlfe(struct zebra_dplane_ctx *ctx, enum lsp_types_t lsp_type, enum nexthop_types_t nh_type, - union g_addr *gate, + const union g_addr *gate, ifindex_t ifindex, uint8_t num_labels, mpls_label_t *out_labels); diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c index 8ee8601689..e741268ebb 100644 --- a/zebra/zebra_mpls.c +++ b/zebra/zebra_mpls.c @@ -110,17 +110,14 @@ static zebra_nhlfe_t *nhlfe_find(struct nhlfe_list_head *list, static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, enum nexthop_types_t gtype, const union g_addr *gate, ifindex_t ifindex, - uint8_t num_labels, const mpls_label_t *labels); + uint8_t num_labels, const mpls_label_t *labels, + bool is_backup); static int nhlfe_del(zebra_nhlfe_t *nhlfe); static void nhlfe_free(zebra_nhlfe_t *nhlfe); static void nhlfe_out_label_update(zebra_nhlfe_t *nhlfe, struct mpls_label_stack *nh_label); static int mpls_lsp_uninstall_all(struct hash *lsp_table, zebra_lsp_t *lsp, enum lsp_types_t type); -static int lsp_backup_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, - mpls_label_t in_label, - enum nexthop_types_t gtype, - const union g_addr *gate, ifindex_t ifindex); static int mpls_static_lsp_uninstall_all(struct zebra_vrf *zvrf, mpls_label_t in_label); static void nhlfe_print(zebra_nhlfe_t *nhlfe, struct vty *vty); @@ -167,6 +164,15 @@ static void clear_nhlfe_installed(zebra_lsp_t *lsp) UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED); UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); } + + frr_each_safe(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe) { + nexthop = nhlfe->nexthop; + if (!nexthop) + continue; + + UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED); + UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); + } } /* @@ -240,7 +246,8 @@ static int lsp_install(struct zebra_vrf *zvrf, mpls_label_t label, nhlfe = nhlfe_add(lsp, lsp_type, nexthop->type, &nexthop->gate, nexthop->ifindex, nexthop->nh_label->num_labels, - nexthop->nh_label->label); + nexthop->nh_label->label, + false /*backup*/); if (!nhlfe) return -1; @@ -797,8 +804,7 @@ static void lsp_select_best_nhlfe(zebra_lsp_t *lsp) /* * First compute the best path, after checking nexthop status. We are - * only - * concerned with non-deleted NHLFEs. + * only concerned with non-deleted NHLFEs. */ frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe) { /* Clear selection flags. */ @@ -816,6 +822,14 @@ static void lsp_select_best_nhlfe(zebra_lsp_t *lsp) if (!lsp->best_nhlfe) return; + /* + * Check the active status of backup nhlfes also + */ + frr_each_safe(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe) { + if (!CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED)) + (void)nhlfe_nexthop_active(nhlfe); + } + /* Mark best NHLFE as selected. */ SET_FLAG(lsp->best_nhlfe->flags, NHLFE_FLAG_SELECTED); @@ -910,9 +924,9 @@ static wq_item_status lsp_process(struct work_queue *wq, void *data) if (IS_ZEBRA_DEBUG_MPLS) { if (oldbest) - nhlfe2str(oldbest, buf, BUFSIZ); + nhlfe2str(oldbest, buf, sizeof(buf)); if (newbest) - nhlfe2str(newbest, buf2, BUFSIZ); + nhlfe2str(newbest, buf2, sizeof(buf2)); zlog_debug( "Process LSP in-label %u oldbest %s newbest %s " "flags 0x%x ecmp# %d", @@ -1310,13 +1324,14 @@ static zebra_nhlfe_t *nhlfe_alloc(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, } /* - * Add NHLFE. Base entry must have been created and duplicate - * check done. + * Add primary or backup NHLFE. Base entry must have been created and + * duplicate check done. */ static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, enum nexthop_types_t gtype, const union g_addr *gate, ifindex_t ifindex, - uint8_t num_labels, const mpls_label_t *labels) + uint8_t num_labels, const mpls_label_t *labels, + bool is_backup) { zebra_nhlfe_t *nhlfe; @@ -1327,36 +1342,12 @@ static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, nhlfe = nhlfe_alloc(lsp, lsp_type, gtype, gate, ifindex, num_labels, labels); - /* Enqueue to LSP, at head of list. */ - nhlfe_list_add_head(&lsp->nhlfe_list, nhlfe); - - return nhlfe; -} - -/* - * Add backup NHLFE. Base entry must have been created and duplicate - * check done. - */ -static zebra_nhlfe_t *nhlfe_backup_add(zebra_lsp_t *lsp, - enum lsp_types_t lsp_type, - enum nexthop_types_t gtype, - const union g_addr *gate, - ifindex_t ifindex, uint8_t num_labels, - const mpls_label_t *labels) -{ - zebra_nhlfe_t *nhlfe; - - if (!lsp) - return NULL; - - /* Allocate new object */ - nhlfe = nhlfe_alloc(lsp, lsp_type, gtype, gate, ifindex, num_labels, - labels); - - SET_FLAG(nhlfe->flags, NHLFE_FLAG_IS_BACKUP); - - /* Enqueue to LSP, at tail of list. */ - nhlfe_list_add_tail(&lsp->backup_nhlfe_list, nhlfe); + /* Enqueue to LSP: primaries at head of list, backups at tail */ + if (is_backup) { + SET_FLAG(nhlfe->flags, NHLFE_FLAG_IS_BACKUP); + nhlfe_list_add_tail(&lsp->backup_nhlfe_list, nhlfe); + } else + nhlfe_list_add_head(&lsp->nhlfe_list, nhlfe); return nhlfe; } @@ -1590,6 +1581,9 @@ static void nhlfe_print(zebra_nhlfe_t *nhlfe, struct vty *vty) break; } vty_out(vty, "%s", + CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_IS_BACKUP) ? " (backup)" + : ""); + vty_out(vty, "%s", CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED) ? " (installed)" : ""); vty_out(vty, "\n"); @@ -1616,6 +1610,7 @@ static void lsp_print(struct vty *vty, zebra_lsp_t *lsp) /* Find backup in backup list */ i = 0; + backup = NULL; frr_each(nhlfe_list, &lsp->backup_nhlfe_list, backup) { if (i == nhlfe->nexthop->backup_idx) break; @@ -1933,23 +1928,27 @@ void zebra_mpls_lsp_dplane_result(struct zebra_dplane_ctx *ctx) /* TODO -- Confirm that this result is still 'current' */ - if (status == ZEBRA_DPLANE_REQUEST_SUCCESS) { - /* Update zebra object */ - SET_FLAG(lsp->flags, LSP_FLAG_INSTALLED); - frr_each(nhlfe_list, &lsp->nhlfe_list, nhlfe) { - nexthop = nhlfe->nexthop; - if (!nexthop) - continue; - - SET_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED); - SET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); - } - } else { + if (status != ZEBRA_DPLANE_REQUEST_SUCCESS) { UNSET_FLAG(lsp->flags, LSP_FLAG_INSTALLED); clear_nhlfe_installed(lsp); flog_warn(EC_ZEBRA_LSP_INSTALL_FAILURE, "LSP Install Failure: in-label %u", lsp->ile.in_label); + break; + } + + /* Update zebra object */ + SET_FLAG(lsp->flags, LSP_FLAG_INSTALLED); + frr_each(nhlfe_list, &lsp->nhlfe_list, nhlfe) { + nexthop = nhlfe->nexthop; + if (!nexthop) + continue; + + if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_SELECTED) && + CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) { + SET_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED); + SET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); + } } break; @@ -1970,53 +1969,23 @@ void zebra_mpls_lsp_dplane_result(struct zebra_dplane_ctx *ctx) } /* - * Process async dplane notifications. + * Process LSP installation info from two sets of nhlfes: a set from + * a dplane notification, and a set from the zebra LSP object. Update + * counters of installed nexthops, and return whether the LSP has changed. */ -void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx) +static bool compare_notif_nhlfes(const struct nhlfe_list_head *ctx_head, + struct nhlfe_list_head *nhlfe_head, + int *start_counter, int *end_counter) { - struct zebra_vrf *zvrf; - zebra_ile_t tmp_ile; - struct hash *lsp_table; - zebra_lsp_t *lsp; zebra_nhlfe_t *nhlfe; - const struct nhlfe_list_head *head; const zebra_nhlfe_t *ctx_nhlfe; struct nexthop *nexthop; const struct nexthop *ctx_nexthop; - int start_count = 0, end_count = 0; /* Installed counts */ + int start_count = 0, end_count = 0; bool changed_p = false; bool is_debug = (IS_ZEBRA_DEBUG_DPLANE | IS_ZEBRA_DEBUG_MPLS); - if (is_debug) - zlog_debug("LSP dplane notif, in-label %u", - dplane_ctx_get_in_label(ctx)); - - /* Look for zebra LSP object */ - zvrf = vrf_info_lookup(VRF_DEFAULT); - if (zvrf == NULL) - goto done; - - lsp_table = zvrf->lsp_table; - - tmp_ile.in_label = dplane_ctx_get_in_label(ctx); - lsp = hash_lookup(lsp_table, &tmp_ile); - if (lsp == NULL) { - if (is_debug) - zlog_debug("dplane LSP notif: in-label %u not found", - dplane_ctx_get_in_label(ctx)); - goto done; - } - - /* - * The dataplane/forwarding plane is notifying zebra about the state - * of the nexthops associated with this LSP. First, we take a - * pre-scan pass to determine whether the LSP has transitioned - * from installed -> uninstalled. In that case, we need to have - * the existing state of the LSP objects available before making - * any changes. - */ - head = dplane_ctx_get_nhlfe_list(ctx); - frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe) { + frr_each_safe(nhlfe_list, nhlfe_head, nhlfe) { char buf[NEXTHOP_STRLEN]; nexthop = nhlfe->nexthop; @@ -2026,8 +1995,9 @@ void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx) if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) start_count++; + ctx_nhlfe = NULL; ctx_nexthop = NULL; - frr_each(nhlfe_list_const, head, ctx_nhlfe) { + frr_each(nhlfe_list_const, ctx_head, ctx_nhlfe) { ctx_nexthop = ctx_nhlfe->nexthop; if (!ctx_nexthop) continue; @@ -2085,32 +2055,39 @@ void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx) } } - if (is_debug) - zlog_debug("LSP dplane notif: lfib start_count %d, end_count %d%s", - start_count, end_count, - changed_p ? ", changed" : ""); + if (start_counter) + *start_counter += start_count; + if (end_counter) + *end_counter += end_count; - /* - * Has the LSP become uninstalled? - */ - if (start_count > 0 && end_count == 0) { - /* Inform other lfibs */ - dplane_lsp_notif_update(lsp, DPLANE_OP_LSP_DELETE, ctx); - } + return changed_p; +} - /* - * Now we take a second pass and bring the zebra - * nexthop state into sync with the forwarding-plane state. - */ - frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe) { +/* + * Update an lsp nhlfe list from a dplane context, typically an async + * notification context. Update the LSP list to match the installed + * status from the context's list. + */ +static int update_nhlfes_from_ctx(struct nhlfe_list_head *nhlfe_head, + const struct nhlfe_list_head *ctx_head) +{ + int ret = 0; + zebra_nhlfe_t *nhlfe; + const zebra_nhlfe_t *ctx_nhlfe; + struct nexthop *nexthop; + const struct nexthop *ctx_nexthop; + bool is_debug = (IS_ZEBRA_DEBUG_DPLANE | IS_ZEBRA_DEBUG_MPLS); + + frr_each_safe(nhlfe_list, nhlfe_head, nhlfe) { char buf[NEXTHOP_STRLEN]; nexthop = nhlfe->nexthop; if (!nexthop) continue; + ctx_nhlfe = NULL; ctx_nexthop = NULL; - frr_each(nhlfe_list_const, head, ctx_nhlfe) { + frr_each(nhlfe_list_const, ctx_head, ctx_nhlfe) { ctx_nexthop = ctx_nhlfe->nexthop; if (!ctx_nexthop) continue; @@ -2130,10 +2107,16 @@ void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx) /* Bring zebra nhlfe install state into sync */ if (CHECK_FLAG(ctx_nhlfe->flags, NHLFE_FLAG_INSTALLED)) { + if (is_debug) + zlog_debug("%s: matched lsp nhlfe %s (installed)", + __func__, buf); SET_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED); } else { + if (is_debug) + zlog_debug("%s: matched lsp nhlfe %s (not installed)", + __func__, buf); UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED); } @@ -2153,13 +2136,101 @@ void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx) } else { /* Not mentioned in lfib set -> uninstalled */ - + if (is_debug) + zlog_debug("%s: no match for lsp nhlfe %s", + __func__, buf); UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED); UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE); } } + return ret; +} + +/* + * Process async dplane notifications. + */ +void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx) +{ + struct zebra_vrf *zvrf; + zebra_ile_t tmp_ile; + struct hash *lsp_table; + zebra_lsp_t *lsp; + const struct nhlfe_list_head *ctx_list; + int start_count = 0, end_count = 0; /* Installed counts */ + bool changed_p = false; + bool is_debug = (IS_ZEBRA_DEBUG_DPLANE | IS_ZEBRA_DEBUG_MPLS); + + if (is_debug) + zlog_debug("LSP dplane notif, in-label %u", + dplane_ctx_get_in_label(ctx)); + + /* Look for zebra LSP object */ + zvrf = vrf_info_lookup(VRF_DEFAULT); + if (zvrf == NULL) + goto done; + + lsp_table = zvrf->lsp_table; + + tmp_ile.in_label = dplane_ctx_get_in_label(ctx); + lsp = hash_lookup(lsp_table, &tmp_ile); + if (lsp == NULL) { + if (is_debug) + zlog_debug("dplane LSP notif: in-label %u not found", + dplane_ctx_get_in_label(ctx)); + goto done; + } + + /* + * The dataplane/forwarding plane is notifying zebra about the state + * of the nexthops associated with this LSP. First, we take a + * pre-scan pass to determine whether the LSP has transitioned + * from installed -> uninstalled. In that case, we need to have + * the existing state of the LSP objects available before making + * any changes. + */ + ctx_list = dplane_ctx_get_nhlfe_list(ctx); + + changed_p = compare_notif_nhlfes(ctx_list, &lsp->nhlfe_list, + &start_count, &end_count); + + if (is_debug) + zlog_debug("LSP dplane notif: lfib start_count %d, end_count %d%s", + start_count, end_count, + changed_p ? ", changed" : ""); + + ctx_list = dplane_ctx_get_backup_nhlfe_list(ctx); + + if (compare_notif_nhlfes(ctx_list, &lsp->backup_nhlfe_list, + &start_count, &end_count)) + /* Avoid accidentally setting back to 'false' */ + changed_p = true; + + if (is_debug) + zlog_debug("LSP dplane notif: lfib backups, start_count %d, end_count %d%s", + start_count, end_count, + changed_p ? ", changed" : ""); + + /* + * Has the LSP become uninstalled? We need the existing state of the + * nexthops/nhlfes at this point so we know what to delete. + */ + if (start_count > 0 && end_count == 0) { + /* Inform other lfibs */ + dplane_lsp_notif_update(lsp, DPLANE_OP_LSP_DELETE, ctx); + } + + /* + * Now we take a second pass and bring the zebra + * nexthop state into sync with the forwarding-plane state. + */ + ctx_list = dplane_ctx_get_nhlfe_list(ctx); + update_nhlfes_from_ctx(&lsp->nhlfe_list, ctx_list); + + ctx_list = dplane_ctx_get_backup_nhlfe_list(ctx); + update_nhlfes_from_ctx(&lsp->backup_nhlfe_list, ctx_list); + if (end_count > 0) { SET_FLAG(lsp->flags, LSP_FLAG_INSTALLED); @@ -2234,14 +2305,14 @@ int zebra_mpls_lsp_uninstall(struct zebra_vrf *zvrf, struct route_node *rn, zebra_nhlfe_t *zebra_mpls_lsp_add_nhlfe(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, enum nexthop_types_t gtype, - union g_addr *gate, + const union g_addr *gate, ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *out_labels) { /* Just a public pass-through to the internal implementation */ return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, num_labels, - out_labels); + out_labels, false /*backup*/); } /* @@ -2252,14 +2323,14 @@ zebra_nhlfe_t *zebra_mpls_lsp_add_nhlfe(zebra_lsp_t *lsp, zebra_nhlfe_t *zebra_mpls_lsp_add_backup_nhlfe(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, enum nexthop_types_t gtype, - union g_addr *gate, + const union g_addr *gate, ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *out_labels) { /* Just a public pass-through to the internal implementation */ - return nhlfe_backup_add(lsp, lsp_type, gtype, gate, ifindex, num_labels, - out_labels); + return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, num_labels, + out_labels, true); } /* @@ -2275,7 +2346,8 @@ zebra_nhlfe_t *zebra_mpls_lsp_add_nh(zebra_lsp_t *lsp, return NULL; nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate, nh->ifindex, - nh->nh_label->num_labels, nh->nh_label->label); + nh->nh_label->num_labels, nh->nh_label->label, + false /*backup*/); return nhlfe; } @@ -2293,9 +2365,9 @@ zebra_nhlfe_t *zebra_mpls_lsp_add_backup_nh(zebra_lsp_t *lsp, if (nh->nh_label == NULL || nh->nh_label->num_labels == 0) return NULL; - nhlfe = nhlfe_backup_add(lsp, lsp_type, nh->type, &nh->gate, + nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate, nh->ifindex, nh->nh_label->num_labels, - nh->nh_label->label); + nh->nh_label->label, true); return nhlfe; } @@ -2846,6 +2918,9 @@ int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, SET_FLAG(re->status, ROUTE_ENTRY_CHANGED); SET_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED); + /* This will create (or ref) a new nhe, so we will discard the local + * temporary nhe + */ mpls_zebra_nhe_update(re, afi, new_nhe); zebra_nhg_free(new_nhe); @@ -3013,7 +3088,8 @@ int mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf, else ret = mpls_lsp_uninstall(zvrf, zl->type, zl->local_label, znh->type, - &znh->gate, znh->ifindex); + &znh->gate, znh->ifindex, + false); if (ret < 0) { if (IS_ZEBRA_DEBUG_RECV || IS_ZEBRA_DEBUG_MPLS) { zapi_nexthop2str(znh, buf, sizeof(buf)); @@ -3055,10 +3131,10 @@ int mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf, if (add_p) ret = lsp_backup_znh_install(lsp, zl->type, znh); else - ret = lsp_backup_uninstall(zvrf, zl->type, - zl->local_label, - znh->type, &znh->gate, - znh->ifindex); + ret = mpls_lsp_uninstall(zvrf, zl->type, + zl->local_label, + znh->type, &znh->gate, + znh->ifindex, true); if (ret < 0) { if (IS_ZEBRA_DEBUG_RECV || @@ -3123,92 +3199,22 @@ static zebra_nhlfe_t * lsp_add_nhlfe(zebra_lsp_t *lsp, enum lsp_types_t type, uint8_t num_out_labels, const mpls_label_t *out_labels, enum nexthop_types_t gtype, const union g_addr *gate, - ifindex_t ifindex) + ifindex_t ifindex, bool is_backup) { zebra_nhlfe_t *nhlfe; char buf[MPLS_LABEL_STRLEN]; + const char *backup_str; - nhlfe = nhlfe_find(&lsp->nhlfe_list, type, gtype, gate, ifindex); - if (nhlfe) { - struct nexthop *nh = nhlfe->nexthop; - - assert(nh); - assert(nh->nh_label); - - /* Clear deleted flag (in case it was set) */ - UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED); - if (nh->nh_label->num_labels == num_out_labels - && !memcmp(nh->nh_label->label, out_labels, - sizeof(mpls_label_t) * num_out_labels)) - /* No change */ - return nhlfe; - - if (IS_ZEBRA_DEBUG_MPLS) { - char buf2[MPLS_LABEL_STRLEN]; - char buf3[MPLS_LABEL_STRLEN]; - - nhlfe2str(nhlfe, buf, sizeof(buf)); - mpls_label2str(num_out_labels, out_labels, buf2, - sizeof(buf2), 0); - mpls_label2str(nh->nh_label->num_labels, - nh->nh_label->label, buf3, sizeof(buf3), - 0); - - zlog_debug("LSP in-label %u type %d nexthop %s out-label(s) changed to %s (old %s)", - lsp->ile.in_label, type, buf, buf2, buf3); - } - - /* Update out label(s), trigger processing. */ - if (nh->nh_label->num_labels == num_out_labels) - memcpy(nh->nh_label->label, out_labels, - sizeof(mpls_label_t) * num_out_labels); - else { - nexthop_del_labels(nh); - nexthop_add_labels(nh, type, num_out_labels, - out_labels); - } + if (is_backup) { + nhlfe = nhlfe_find(&lsp->backup_nhlfe_list, type, gtype, + gate, ifindex); + backup_str = "backup "; } else { - /* Add LSP entry to this nexthop */ - nhlfe = nhlfe_add(lsp, type, gtype, gate, ifindex, - num_out_labels, out_labels); - if (!nhlfe) - return NULL; - - if (IS_ZEBRA_DEBUG_MPLS) { - char buf2[MPLS_LABEL_STRLEN]; - - nhlfe2str(nhlfe, buf, sizeof(buf)); - mpls_label2str(num_out_labels, out_labels, buf2, - sizeof(buf2), 0); - - zlog_debug("Add LSP in-label %u type %d nexthop %s out-label(s) %s", - lsp->ile.in_label, type, buf, buf2); - } - - lsp->addr_family = NHLFE_FAMILY(nhlfe); + nhlfe = nhlfe_find(&lsp->nhlfe_list, type, gtype, gate, + ifindex); + backup_str = ""; } - /* Mark NHLFE, queue LSP for processing. */ - SET_FLAG(nhlfe->flags, NHLFE_FLAG_CHANGED); - - return nhlfe; -} - -/* - * Install/update a NHLFE for an LSP in the forwarding table. This may be - * a new LSP entry or a new NHLFE for an existing in-label or an update of - * the out-label for an existing NHLFE (update case). - */ -static zebra_nhlfe_t * -lsp_add_backup_nhlfe(zebra_lsp_t *lsp, enum lsp_types_t type, - uint8_t num_out_labels, const mpls_label_t *out_labels, - enum nexthop_types_t gtype, const union g_addr *gate, - ifindex_t ifindex) -{ - zebra_nhlfe_t *nhlfe; - char buf[MPLS_LABEL_STRLEN]; - - nhlfe = nhlfe_find(&lsp->backup_nhlfe_list, type, gtype, gate, ifindex); if (nhlfe) { struct nexthop *nh = nhlfe->nexthop; @@ -3234,8 +3240,9 @@ lsp_add_backup_nhlfe(zebra_lsp_t *lsp, enum lsp_types_t type, nh->nh_label->label, buf3, sizeof(buf3), 0); - zlog_debug("LSP in-label %u type %d backup nexthop %s out-label(s) changed to %s (old %s)", - lsp->ile.in_label, type, buf, buf2, buf3); + zlog_debug("LSP in-label %u type %d %snexthop %s out-label(s) changed to %s (old %s)", + lsp->ile.in_label, type, backup_str, buf, + buf2, buf3); } /* Update out label(s), trigger processing. */ @@ -3249,8 +3256,8 @@ lsp_add_backup_nhlfe(zebra_lsp_t *lsp, enum lsp_types_t type, } } else { /* Add LSP entry to this nexthop */ - nhlfe = nhlfe_backup_add(lsp, type, gtype, gate, ifindex, - num_out_labels, out_labels); + nhlfe = nhlfe_add(lsp, type, gtype, gate, ifindex, + num_out_labels, out_labels, is_backup); if (!nhlfe) return NULL; @@ -3261,8 +3268,9 @@ lsp_add_backup_nhlfe(zebra_lsp_t *lsp, enum lsp_types_t type, mpls_label2str(num_out_labels, out_labels, buf2, sizeof(buf2), 0); - zlog_debug("Add LSP in-label %u type %d backup nexthop %s out-label(s) %s", - lsp->ile.in_label, type, buf, buf2); + zlog_debug("Add LSP in-label %u type %d %snexthop %s out-label(s) %s", + lsp->ile.in_label, type, backup_str, buf, + buf2); } lsp->addr_family = NHLFE_FAMILY(nhlfe); @@ -3300,7 +3308,7 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, return -1; nhlfe = lsp_add_nhlfe(lsp, type, num_out_labels, out_labels, gtype, - gate, ifindex); + gate, ifindex, false /*backup*/); if (nhlfe == NULL) return -1; @@ -3320,7 +3328,8 @@ static int lsp_znh_install(zebra_lsp_t *lsp, enum lsp_types_t type, zebra_nhlfe_t *nhlfe; nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels, - znh->type, &znh->gate, znh->ifindex); + znh->type, &znh->gate, znh->ifindex, + false /*backup*/); if (nhlfe == NULL) return -1; @@ -3345,9 +3354,9 @@ static int lsp_backup_znh_install(zebra_lsp_t *lsp, enum lsp_types_t type, { zebra_nhlfe_t *nhlfe; - nhlfe = lsp_add_backup_nhlfe(lsp, type, znh->label_num, - znh->labels, znh->type, &znh->gate, - znh->ifindex); + nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, + znh->labels, znh->type, &znh->gate, + znh->ifindex, true /*backup*/); if (nhlfe == NULL) { if (IS_ZEBRA_DEBUG_MPLS) zlog_debug("%s: unable to add backup nhlfe, label: %u", @@ -3368,13 +3377,14 @@ static int lsp_backup_znh_install(zebra_lsp_t *lsp, enum lsp_types_t type, */ int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, mpls_label_t in_label, enum nexthop_types_t gtype, - const union g_addr *gate, ifindex_t ifindex) + const union g_addr *gate, ifindex_t ifindex, + bool backup_p) { struct hash *lsp_table; zebra_ile_t tmp_ile; zebra_lsp_t *lsp; zebra_nhlfe_t *nhlfe; - char buf[BUFSIZ]; + char buf[NEXTHOP_STRLEN]; bool schedule_lsp = false; /* Lookup table. */ @@ -3387,7 +3397,13 @@ int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, lsp = hash_lookup(lsp_table, &tmp_ile); if (!lsp) return 0; - nhlfe = nhlfe_find(&lsp->nhlfe_list, type, gtype, gate, ifindex); + + if (backup_p) + nhlfe = nhlfe_find(&lsp->backup_nhlfe_list, type, gtype, + gate, ifindex); + else + nhlfe = nhlfe_find(&lsp->nhlfe_list, type, gtype, gate, + ifindex); if (!nhlfe) return 0; @@ -3420,56 +3436,6 @@ int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, return 0; } -/* - * Uninstall a particular NHLFE in the forwarding table. If this is - * the only NHLFE, the entire LSP forwarding entry has to be deleted. - */ -static int lsp_backup_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, - mpls_label_t in_label, - enum nexthop_types_t gtype, - const union g_addr *gate, ifindex_t ifindex) -{ - struct hash *lsp_table; - zebra_ile_t tmp_ile; - zebra_lsp_t *lsp; - zebra_nhlfe_t *nhlfe; - char buf[BUFSIZ]; - - /* Lookup table. */ - lsp_table = zvrf->lsp_table; - if (!lsp_table) - return -1; - - /* If entry is not present, exit. */ - tmp_ile.in_label = in_label; - lsp = hash_lookup(lsp_table, &tmp_ile); - if (!lsp) - return 0; - nhlfe = nhlfe_find(&lsp->backup_nhlfe_list, type, gtype, gate, ifindex); - if (!nhlfe) - return 0; - - if (IS_ZEBRA_DEBUG_MPLS) { - nhlfe2str(nhlfe, buf, BUFSIZ); - zlog_debug("Del backup LSP in-label %u type %d nexthop %s flags 0x%x", - in_label, type, buf, nhlfe->flags); - } - - /* Mark NHLFE for delete or directly delete, as appropriate. */ - if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED)) { - UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_CHANGED); - SET_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED); - if (lsp_processq_add(lsp)) - return -1; - } else { - nhlfe_del(nhlfe); - - /* Free LSP entry if no other NHLFEs and not scheduled. */ - lsp_check_free(lsp_table, &lsp); - } - return 0; -} - int mpls_lsp_uninstall_all_vrf(struct zebra_vrf *zvrf, enum lsp_types_t type, mpls_label_t in_label) { @@ -3754,7 +3720,7 @@ int zebra_mpls_static_lsp_del(struct zebra_vrf *zvrf, mpls_label_t in_label, /* Uninstall LSP from the main table. */ mpls_lsp_uninstall(zvrf, ZEBRA_LSP_STATIC, in_label, gtype, - gate, ifindex); + gate, ifindex, false); /* Delete static LSP NHLFE */ snhlfe_del(snhlfe); diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h index 9b5fb39573..07a8efeb8b 100644 --- a/zebra/zebra_mpls.h +++ b/zebra/zebra_mpls.h @@ -213,7 +213,7 @@ int zebra_mpls_lsp_uninstall(struct zebra_vrf *zvrf, struct route_node *rn, zebra_nhlfe_t *zebra_mpls_lsp_add_nhlfe(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, enum nexthop_types_t gtype, - union g_addr *gate, + const union g_addr *gate, ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *out_labels); @@ -222,7 +222,7 @@ zebra_nhlfe_t *zebra_mpls_lsp_add_nhlfe(zebra_lsp_t *lsp, zebra_nhlfe_t *zebra_mpls_lsp_add_backup_nhlfe(zebra_lsp_t *lsp, enum lsp_types_t lsp_type, enum nexthop_types_t gtype, - union g_addr *gate, + const union g_addr *gate, ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *out_labels); @@ -331,7 +331,8 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type, */ int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, mpls_label_t in_label, enum nexthop_types_t gtype, - const union g_addr *gate, ifindex_t ifindex); + const union g_addr *gate, ifindex_t ifindex, + bool backup_p); /* * Uninstall all NHLFEs for a particular LSP forwarding entry. @@ -339,12 +340,6 @@ int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, int mpls_lsp_uninstall_all_vrf(struct zebra_vrf *zvrf, enum lsp_types_t type, mpls_label_t in_label); -/* - * Uninstall all Segment Routing NHLFEs for a particular LSP forwarding entry. - * If no other NHLFEs exist, the entry would be deleted. - */ -void mpls_sr_lsp_uninstall_all(struct hash_bucket *bucket, void *ctxt); - #if defined(HAVE_CUMULUS) /* * Check that the label values used in LSP creation are consistent. The diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index 02ba69bd4d..9bfd7aacb7 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -1734,6 +1734,10 @@ static bool nexthop_valid_resolve(const struct nexthop *nexthop, if (CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_RECURSIVE)) return false; + /* Must be ACTIVE */ + if (!CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_ACTIVE)) + return false; + switch (nexthop->type) { case NEXTHOP_TYPE_IPV4_IFINDEX: case NEXTHOP_TYPE_IPV6_IFINDEX: @@ -1917,6 +1921,13 @@ static int nexthop_active(afi_t afi, struct route_entry *re, if (nexthop->type == NEXTHOP_TYPE_IPV4 || nexthop->type == NEXTHOP_TYPE_IPV6) nexthop->ifindex = newhop->ifindex; + else if (nexthop->ifindex != newhop->ifindex) { + /* + * NEXTHOP_TYPE_*_IFINDEX but ifindex + * doesn't match what we found. + */ + return 0; + } } if (IS_ZEBRA_DEBUG_NHG_DETAIL) @@ -1926,11 +1937,23 @@ static int nexthop_active(afi_t afi, struct route_entry *re, return 1; } else if (CHECK_FLAG(re->flags, ZEBRA_FLAG_ALLOW_RECURSION)) { + struct nexthop_group *nhg; + resolved = 0; - for (ALL_NEXTHOPS(match->nhe->nhg, newhop)) { - if (!CHECK_FLAG(match->status, - ROUTE_ENTRY_INSTALLED)) - continue; + + /* Only useful if installed */ + if (!CHECK_FLAG(match->status, ROUTE_ENTRY_INSTALLED)) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: match %p (%u) not installed", + __func__, match, + match->nhe->id); + + goto done_with_match; + } + + /* Examine installed nexthops */ + nhg = &match->nhe->nhg; + for (ALL_NEXTHOPS_PTR(nhg, newhop)) { if (!nexthop_valid_resolve(nexthop, newhop)) continue; @@ -1945,25 +1968,21 @@ static int nexthop_active(afi_t afi, struct route_entry *re, resolved = 1; } - if (resolved) - re->nexthop_mtu = match->mtu; - else if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug( - " %s: Recursion failed to find", - __func__); + /* Examine installed backup nexthops, if any. There + * are only installed backups *if* there is a + * dedicated fib list. + */ + nhg = rib_get_fib_backup_nhg(match); + if (nhg == NULL || + nhg == zebra_nhg_get_backup_nhg(match->nhe)) + goto done_with_match; - return resolved; - } else if (re->type == ZEBRA_ROUTE_STATIC) { - resolved = 0; - for (ALL_NEXTHOPS(match->nhe->nhg, newhop)) { - if (!CHECK_FLAG(match->status, - ROUTE_ENTRY_INSTALLED)) - continue; + for (ALL_NEXTHOPS_PTR(nhg, newhop)) { if (!nexthop_valid_resolve(nexthop, newhop)) continue; - if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug("%s: STATIC match %p (%u), newhop %pNHv", + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: RECURSIVE match backup %p (%u), newhop %pNHv", __func__, match, match->nhe->id, newhop); @@ -1972,13 +1991,14 @@ static int nexthop_active(afi_t afi, struct route_entry *re, nexthop_set_resolved(afi, newhop, nexthop); resolved = 1; } +done_with_match: if (resolved) re->nexthop_mtu = match->mtu; - - if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED) + else if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug( - " %s: Static route unable to resolve", + " %s: Recursion failed to find", __func__); + return resolved; } else { if (IS_ZEBRA_DEBUG_RIB_DETAILED) { @@ -2170,17 +2190,20 @@ done: } /* - * Process a list of nexthops, given the head of the list, determining + * Process a list of nexthops, given an nhg, determining * whether each one is ACTIVE/installable at this time. */ static uint32_t nexthop_list_active_update(struct route_node *rn, struct route_entry *re, - struct nexthop *nexthop) + struct nexthop_group *nhg) { union g_addr prev_src; unsigned int prev_active, new_active; ifindex_t prev_index; uint32_t counter = 0; + struct nexthop *nexthop; + + nexthop = nhg->nexthop; /* Process nexthops one-by-one */ for ( ; nexthop; nexthop = nexthop->next) { @@ -2263,7 +2286,7 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re) curr_nhe->id = 0; /* Process nexthops */ - curr_active = nexthop_list_active_update(rn, re, curr_nhe->nhg.nexthop); + curr_active = nexthop_list_active_update(rn, re, &curr_nhe->nhg); if (IS_ZEBRA_DEBUG_NHG_DETAIL) zlog_debug("%s: re %p curr_active %u", __func__, re, @@ -2274,7 +2297,7 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re) goto backups_done; backup_active = nexthop_list_active_update( - rn, re, zebra_nhg_get_backup_nhg(curr_nhe)->nexthop); + rn, re, zebra_nhg_get_backup_nhg(curr_nhe)); if (IS_ZEBRA_DEBUG_NHG_DETAIL) zlog_debug("%s: re %p backup_active %u", __func__, re, diff --git a/zebra/zebra_pw.c b/zebra/zebra_pw.c index 8f0c964c18..2328ab650a 100644 --- a/zebra/zebra_pw.c +++ b/zebra/zebra_pw.c @@ -550,7 +550,7 @@ static void vty_show_mpls_pseudowire_detail(struct vty *vty) re = rib_match(family2afi(pw->af), SAFI_UNICAST, pw->vrf_id, &pw->nexthop, NULL); if (re) { - for (ALL_NEXTHOPS_PTR(rib_active_nhg(re), nexthop)) { + for (ALL_NEXTHOPS_PTR(rib_get_fib_nhg(re), nexthop)) { snprintfrr(buf_nh, sizeof(buf_nh), "%pNHv", nexthop); vty_out(vty, " Next Hop: %s\n", buf_nh); @@ -604,7 +604,7 @@ static void vty_show_mpls_pseudowire(struct zebra_pw *pw, json_object *json_pws) re = rib_match(family2afi(pw->af), SAFI_UNICAST, pw->vrf_id, &pw->nexthop, NULL); if (re) { - for (ALL_NEXTHOPS_PTR(rib_active_nhg(re), nexthop)) { + for (ALL_NEXTHOPS_PTR(rib_get_fib_nhg(re), nexthop)) { json_nexthop = json_object_new_object(); snprintfrr(buf_nh, sizeof(buf_nh), "%pNHv", nexthop); json_object_string_add(json_nexthop, "nexthop", buf_nh); diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index 31582dcb3d..67b3812ed3 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -1342,6 +1342,92 @@ static bool rib_compare_routes(const struct route_entry *re1, } /* + * Compare nexthop lists from a route and a dplane context; test whether + * the list installed in the FIB matches the route's list. + * Set 'changed_p' to 'true' if there were changes to the route's + * installed nexthops. + * + * Return 'false' if any ACTIVE route nexthops are not mentioned in the FIB + * list. + */ +static bool rib_update_nhg_from_ctx(struct nexthop_group *re_nhg, + const struct nexthop_group *ctx_nhg, + bool *changed_p) +{ + bool matched_p = true; + struct nexthop *nexthop, *ctx_nexthop; + + /* Get the first `installed` one to check against. + * If the dataplane doesn't set these to be what was actually installed, + * it will just be whatever was in re->nhe->nhg? + */ + ctx_nexthop = ctx_nhg->nexthop; + + if (CHECK_FLAG(ctx_nexthop->flags, NEXTHOP_FLAG_RECURSIVE) + || !CHECK_FLAG(ctx_nexthop->flags, NEXTHOP_FLAG_ACTIVE)) + ctx_nexthop = nexthop_next_active_resolved(ctx_nexthop); + + for (ALL_NEXTHOPS_PTR(re_nhg, nexthop)) { + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) + continue; + + if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) + continue; + + /* Check for a FIB nexthop corresponding to the RIB nexthop */ + if (nexthop_same(ctx_nexthop, nexthop) == false) { + /* If the FIB doesn't know about the nexthop, + * it's not installed + */ + if (IS_ZEBRA_DEBUG_RIB_DETAILED || + IS_ZEBRA_DEBUG_NHG_DETAIL) { + zlog_debug("%s: no ctx match for rib nh %pNHv %s", + __func__, nexthop, + (CHECK_FLAG(nexthop->flags, + NEXTHOP_FLAG_FIB) ? + "(FIB)":"")); + } + matched_p = false; + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) + *changed_p = true; + + UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); + + /* Keep checking nexthops */ + continue; + } + + if (CHECK_FLAG(ctx_nexthop->flags, NEXTHOP_FLAG_FIB)) { + if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: rib nh %pNHv -> installed", + __func__, nexthop); + + *changed_p = true; + } + + SET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); + } else { + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: rib nh %pNHv -> uninstalled", + __func__, nexthop); + + *changed_p = true; + } + + UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); + } + + ctx_nexthop = nexthop_next_active_resolved(ctx_nexthop); + } + + return matched_p; +} + +/* * Update a route from a dplane context. This consolidates common code * that can be used in processing of results from FIB updates, and in * async notification processing. @@ -1352,10 +1438,10 @@ static bool rib_update_re_from_ctx(struct route_entry *re, struct zebra_dplane_ctx *ctx) { char dest_str[PREFIX_STRLEN] = ""; - char nh_str[NEXTHOP_STRLEN]; - struct nexthop *nexthop, *ctx_nexthop; + struct nexthop *nexthop; bool matched; const struct nexthop_group *ctxnhg; + struct nexthop_group *re_nhg; bool is_selected = false; /* Is 're' currently the selected re? */ bool changed_p = false; /* Change to nexthops? */ rib_dest_t *dest; @@ -1386,10 +1472,13 @@ static bool rib_update_re_from_ctx(struct route_entry *re, matched = false; ctxnhg = dplane_ctx_get_ng(ctx); - /* Check both fib group and notif group for equivalence. + /* Check route's fib group and incoming notif group for equivalence. * * Let's assume the nexthops are ordered here to save time. */ + /* TODO -- this isn't testing or comparing the FIB flags; we should + * do a more explicit loop, checking the incoming notification's flags. + */ if (re->fib_ng.nexthop && ctxnhg->nexthop && nexthop_group_equal(&re->fib_ng, ctxnhg)) matched = true; @@ -1400,7 +1489,7 @@ static bool rib_update_re_from_ctx(struct route_entry *re, zlog_debug( "%s(%u):%s update_from_ctx(): existing fib nhg, no change", VRF_LOGNAME(vrf), re->vrf_id, dest_str); - goto done; + goto check_backups; } else if (re->fib_ng.nexthop) { /* @@ -1430,70 +1519,16 @@ static bool rib_update_re_from_ctx(struct route_entry *re, * * Assume nexthops are ordered here as well. */ - matched = true; - ctx_nexthop = ctxnhg->nexthop; - - /* Nothing installed - we can skip some of the checking/comparison + /* If nothing is installed, we can skip some of the checking/comparison * of nexthops. */ - if (ctx_nexthop == NULL) { + if (ctxnhg->nexthop == NULL) { changed_p = true; goto no_nexthops; } - /* Get the first `installed` one to check against. - * If the dataplane doesn't set these to be what was actually installed, - * it will just be whatever was in re->nhe->nhg? - */ - if (CHECK_FLAG(ctx_nexthop->flags, NEXTHOP_FLAG_RECURSIVE) - || !CHECK_FLAG(ctx_nexthop->flags, NEXTHOP_FLAG_ACTIVE)) - ctx_nexthop = nexthop_next_active_resolved(ctx_nexthop); - - for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) { - - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) - continue; - - if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) - continue; - - /* Check for a FIB nexthop corresponding to the RIB nexthop */ - if (nexthop_same(ctx_nexthop, nexthop) == false) { - /* If the FIB doesn't know about the nexthop, - * it's not installed - */ - if (IS_ZEBRA_DEBUG_RIB_DETAILED) { - nexthop2str(nexthop, nh_str, sizeof(nh_str)); - zlog_debug( - "update_from_ctx: no match for rib nh %s", - nh_str); - } - matched = false; - - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) - changed_p = true; - - UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); - - /* Keep checking nexthops */ - continue; - } - - if (CHECK_FLAG(ctx_nexthop->flags, NEXTHOP_FLAG_FIB)) { - if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) - changed_p = true; - - SET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); - } else { - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) - changed_p = true; - - UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); - } - - ctx_nexthop = nexthop_next_active_resolved(ctx_nexthop); - } + matched = rib_update_nhg_from_ctx(&(re->nhe->nhg), ctxnhg, &changed_p); /* If all nexthops were processed, we're done */ if (matched) { @@ -1502,7 +1537,7 @@ static bool rib_update_re_from_ctx(struct route_entry *re, "%s(%u):%s update_from_ctx(): rib nhg matched, changed '%s'", VRF_LOGNAME(vrf), re->vrf_id, dest_str, (changed_p ? "true" : "false")); - goto done; + goto check_backups; } no_nexthops: @@ -1527,7 +1562,81 @@ no_nexthops: _nexthop_add(&(re->fib_ng.nexthop), nexthop); } +check_backups: + + /* + * Check the status of the route's backup nexthops, if any. + * The logic for backups is somewhat different: if any backup is + * installed, a new fib nhg will be attached to the route. + */ + re_nhg = zebra_nhg_get_backup_nhg(re->nhe); + if (re_nhg == NULL) + goto done; /* No backup nexthops */ + + /* First check the route's 'fib' list of backups, if it's present + * from some previous event. + */ + re_nhg = &re->fib_backup_ng; + ctxnhg = dplane_ctx_get_backup_ng(ctx); + + matched = false; + if (re_nhg->nexthop && ctxnhg && nexthop_group_equal(re_nhg, ctxnhg)) + matched = true; + + /* If the new FIB set matches an existing FIB set, we're done. */ + if (matched) { + if (IS_ZEBRA_DEBUG_RIB) + zlog_debug( + "%s(%u):%s update_from_ctx(): existing fib backup nhg, no change", + VRF_LOGNAME(vrf), re->vrf_id, dest_str); + goto done; + + } else if (re->fib_backup_ng.nexthop) { + /* + * Free stale fib backup list and move on to check + * the route's backups. + */ + if (IS_ZEBRA_DEBUG_RIB) + zlog_debug( + "%s(%u):%s update_from_ctx(): replacing fib backup nhg", + VRF_LOGNAME(vrf), re->vrf_id, dest_str); + nexthops_free(re->fib_backup_ng.nexthop); + re->fib_backup_ng.nexthop = NULL; + + /* Note that the installed nexthops have changed */ + changed_p = true; + } else { + if (IS_ZEBRA_DEBUG_RIB) + zlog_debug("%s(%u):%s update_from_ctx(): no fib backup nhg", + VRF_LOGNAME(vrf), re->vrf_id, dest_str); + } + + /* + * If a FIB backup nexthop set exists: attach a copy + * to the route if any backup is installed + */ + if (ctxnhg && ctxnhg->nexthop) { + + for (ALL_NEXTHOPS_PTR(ctxnhg, nexthop)) { + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) + break; + } + + /* If no installed backups, we're done */ + if (nexthop == NULL) + goto done; + + if (IS_ZEBRA_DEBUG_RIB) + zlog_debug("%s(%u):%s update_from_ctx(): changed %s, adding new backup fib nhg", + VRF_LOGNAME(vrf), re->vrf_id, dest_str, + (changed_p ? "true" : "false")); + + copy_nexthops(&(re->fib_backup_ng.nexthop), ctxnhg->nexthop, + NULL); + } + done: + return changed_p; } @@ -1814,6 +1923,38 @@ done: } /* + * Count installed/FIB nexthops + */ +static int rib_count_installed_nh(struct route_entry *re) +{ + int count = 0; + struct nexthop *nexthop; + struct nexthop_group *nhg; + + nhg = rib_get_fib_nhg(re); + + for (ALL_NEXTHOPS_PTR(nhg, nexthop)) { + /* The meaningful flag depends on where the installed + * nexthops reside. + */ + if (nhg == &(re->fib_backup_ng)) { + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) + count++; + } else { + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) + count++; + } + } + + for (ALL_NEXTHOPS_PTR(rib_get_fib_backup_nhg(re), nexthop)) { + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) + count++; + } + + return count; +} + +/* * Handle notification from async dataplane: the dataplane has detected * some change to a route, and notifies zebra so that the control plane * can reflect that change. @@ -1930,12 +2071,8 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) */ start_count = 0; - if (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)) { - for (ALL_NEXTHOPS_PTR(rib_active_nhg(re), nexthop)) { - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) - start_count++; - } - } + if (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)) + start_count = rib_count_installed_nh(re); /* Update zebra's nexthop FIB flags based on the context struct's * nexthops. @@ -1954,12 +2091,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) * Perform follow-up work if the actual status of the prefix * changed. */ - - end_count = 0; - for (ALL_NEXTHOPS_PTR(rib_active_nhg(re), nexthop)) { - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) - end_count++; - } + end_count = rib_count_installed_nh(re); /* Various fib transitions: changed nexthops; from installed to * not-installed; or not-installed to installed. @@ -1988,7 +2120,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) SET_FLAG(re->status, ROUTE_ENTRY_INSTALLED); /* Changed nexthops - update kernel/others */ - dplane_route_notif_update(rn, re, DPLANE_OP_ROUTE_INSTALL, ctx); + dplane_route_notif_update(rn, re, DPLANE_OP_ROUTE_UPDATE, ctx); /* Redistribute, lsp, and nht update */ redistribute_update(dest_pfx, src_pfx, re, NULL); diff --git a/zebra/zebra_rnh.c b/zebra/zebra_rnh.c index 20af96a557..d1a5cf2a9d 100644 --- a/zebra/zebra_rnh.c +++ b/zebra/zebra_rnh.c @@ -419,7 +419,7 @@ static int zebra_rnh_apply_nht_rmap(afi_t afi, struct zebra_vrf *zvrf, at_least_one++; /* at least one valid NH */ else { SET_FLAG(nexthop->flags, - NEXTHOP_FLAG_RNH_FILTERED); + NEXTHOP_FLAG_RNH_FILTERED); } } } @@ -458,12 +458,12 @@ zebra_rnh_resolve_import_entry(struct zebra_vrf *zvrf, afi_t afi, if (IS_ZEBRA_DEBUG_NHT_DETAILED) { char buf[PREFIX_STRLEN]; - char buf1[PREFIX_STRLEN]; + char buf1[SRCDEST2STR_BUFFER]; zlog_debug("%s: %u:%s Resolved Import Entry to %s", __func__, rnh->vrf_id, prefix2str(&rnh->node->p, buf, sizeof(buf)), - srcdest_rnode2str(rn, buf1, sizeof(buf))); + srcdest_rnode2str(rn, buf1, sizeof(buf1))); } /* Identify appropriate route entry. */ @@ -974,12 +974,131 @@ static void copy_state(struct rnh *rnh, const struct route_entry *re, state->vrf_id = re->vrf_id; state->status = re->status; - state->nhe = zebra_nhg_alloc(); + state->nhe = zebra_nhe_copy(re->nhe, 0); + + /* Copy the 'fib' nexthops also, if present - we want to capture + * the true installed nexthops. + */ + if (re->fib_ng.nexthop) + nexthop_group_copy(&state->fib_ng, &re->fib_ng); + if (re->fib_backup_ng.nexthop) + nexthop_group_copy(&state->fib_backup_ng, &re->fib_backup_ng); - nexthop_group_copy(&(state->nhe->nhg), &(re->nhe->nhg)); rnh->state = state; } +/* + * Compare two route_entries' nexthops. + */ +static bool compare_valid_nexthops(struct route_entry *r1, + struct route_entry *r2) +{ + bool matched_p = false; + struct nexthop_group *nhg1, *nhg2; + struct nexthop *nh1, *nh2; + + /* Account for backup nexthops and for the 'fib' nexthop lists, + * if present. + */ + nhg1 = rib_get_fib_nhg(r1); + nhg2 = rib_get_fib_nhg(r2); + + nh1 = nhg1->nexthop; + nh2 = nhg2->nexthop; + + while (1) { + /* Find each list's next valid nexthop */ + while ((nh1 != NULL) && !rnh_nexthop_valid(r1, nh1)) + nh1 = nexthop_next(nh1); + + while ((nh2 != NULL) && !rnh_nexthop_valid(r2, nh2)) + nh2 = nexthop_next(nh2); + + if (nh1 && nh2) { + /* Any difference is a no-match */ + if (nexthop_cmp(nh1, nh2) != 0) { + if (IS_ZEBRA_DEBUG_NHT_DETAILED) + zlog_debug("%s: nh1, nh2 differ", + __func__); + goto done; + } + + nh1 = nexthop_next(nh1); + nh2 = nexthop_next(nh2); + } else if (nh1 || nh2) { + /* One list has more valid nexthops than the other */ + if (IS_ZEBRA_DEBUG_NHT_DETAILED) + zlog_debug("%s: nh1 %s, nh2 %s", __func__, + nh1 ? "non-NULL" : "NULL", + nh2 ? "non-NULL" : "NULL"); + goto done; + } else + break; /* Done with both lists */ + } + + /* The test for the backups is slightly different: the only installed + * backups will be in the 'fib' list. + */ + nhg1 = rib_get_fib_backup_nhg(r1); + if (nhg1 == zebra_nhg_get_backup_nhg(r1->nhe)) + nhg1 = NULL; + + nhg2 = rib_get_fib_backup_nhg(r2); + if (nhg2 == zebra_nhg_get_backup_nhg(r2->nhe)) + nhg2 = NULL; + + if (nhg1) + nh1 = nhg1->nexthop; + else + nh1 = NULL; + + if (nhg2) + nh2 = nhg2->nexthop; + else + nh2 = NULL; + + while (1) { + /* Find each backup list's next valid nexthop */ + while ((nh1 != NULL) && !rnh_nexthop_valid(r1, nh1)) + nh1 = nexthop_next(nh1); + + while ((nh2 != NULL) && !rnh_nexthop_valid(r2, nh2)) + nh2 = nexthop_next(nh2); + + if (nh1 && nh2) { + /* Any difference is a no-match */ + if (nexthop_cmp(nh1, nh2) != 0) { + if (IS_ZEBRA_DEBUG_NHT_DETAILED) + zlog_debug("%s: backup nh1, nh2 differ", + __func__); + goto done; + } + + nh1 = nexthop_next(nh1); + nh2 = nexthop_next(nh2); + } else if (nh1 || nh2) { + /* One list has more valid nexthops than the other */ + if (IS_ZEBRA_DEBUG_NHT_DETAILED) + zlog_debug("%s: backup nh1 %s, nh2 %s", + __func__, + nh1 ? "non-NULL" : "NULL", + nh2 ? "non-NULL" : "NULL"); + goto done; + } else + break; /* Done with both lists */ + } + + /* Well, it's a match */ + if (IS_ZEBRA_DEBUG_NHT_DETAILED) + zlog_debug("%s: matched", __func__); + + matched_p = true; + +done: + + return matched_p; +} + static int compare_state(struct route_entry *r1, struct route_entry *r2) { if (!r1 && !r2) @@ -994,12 +1113,7 @@ static int compare_state(struct route_entry *r1, struct route_entry *r2) if (r1->metric != r2->metric) return 1; - if (nexthop_group_nexthop_num(&(r1->nhe->nhg)) - != nexthop_group_nexthop_num(&(r2->nhe->nhg))) - return 1; - - if (nexthop_group_hash(&(r1->nhe->nhg)) != - nexthop_group_hash(&(r2->nhe->nhg))) + if (!compare_valid_nexthops(r1, r2)) return 1; return 0; @@ -1044,6 +1158,7 @@ static int send_client(struct rnh *rnh, struct zserv *client, } if (re) { struct zapi_nexthop znh; + struct nexthop_group *nhg; stream_putc(s, re->type); stream_putw(s, re->instance); @@ -1052,7 +1167,9 @@ static int send_client(struct rnh *rnh, struct zserv *client, num = 0; nump = stream_get_endp(s); stream_putc(s, 0); - for (ALL_NEXTHOPS(re->nhe->nhg, nh)) + + nhg = rib_get_fib_nhg(re); + for (ALL_NEXTHOPS_PTR(nhg, nh)) if (rnh_nexthop_valid(re, nh)) { zapi_nexthop_from_nexthop(&znh, nh); ret = zapi_nexthop_encode(s, &znh, 0/*flags*/); @@ -1061,6 +1178,21 @@ static int send_client(struct rnh *rnh, struct zserv *client, num++; } + + nhg = rib_get_fib_backup_nhg(re); + if (nhg == zebra_nhg_get_backup_nhg(re->nhe)) + nhg = NULL; + + if (nhg) { + for (ALL_NEXTHOPS_PTR(nhg, nh)) + if (rnh_nexthop_valid(re, nh)) { + zapi_nexthop_from_nexthop(&znh, nh); + zapi_nexthop_encode(s, &znh, + 0 /* flags */); + num++; + } + } + stream_putc_at(s, nump, num); } else { stream_putc(s, 0); // type diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c index 9718b40d9d..1da2660509 100644 --- a/zebra/zebra_vty.c +++ b/zebra/zebra_vty.c @@ -516,8 +516,189 @@ static void show_route_nexthop_helper(struct vty *vty, sizeof(buf), 1)); } - if ((re == NULL) && nexthop->weight) + if (nexthop->weight) vty_out(vty, ", weight %u", nexthop->weight); + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) + vty_out(vty, ", backup %d", nexthop->backup_idx); +} + +/* + * Render a nexthop into a json object; the caller allocates and owns + * the json object memory. + */ +static void show_nexthop_json_helper(json_object *json_nexthop, + const struct nexthop *nexthop, + const struct route_entry *re) +{ + char buf[SRCDEST2STR_BUFFER]; + struct vrf *vrf = NULL; + json_object *json_labels = NULL; + + json_object_int_add(json_nexthop, "flags", + nexthop->flags); + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE)) + json_object_boolean_true_add(json_nexthop, + "duplicate"); + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) + json_object_boolean_true_add(json_nexthop, + "fib"); + + switch (nexthop->type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + json_object_string_add( + json_nexthop, "ip", + inet_ntoa(nexthop->gate.ipv4)); + json_object_string_add(json_nexthop, "afi", + "ipv4"); + + if (nexthop->ifindex) { + json_object_int_add(json_nexthop, + "interfaceIndex", + nexthop->ifindex); + json_object_string_add( + json_nexthop, "interfaceName", + ifindex2ifname( + nexthop->ifindex, + nexthop->vrf_id)); + } + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + json_object_string_add( + json_nexthop, "ip", + inet_ntop(AF_INET6, &nexthop->gate.ipv6, + buf, sizeof(buf))); + json_object_string_add(json_nexthop, "afi", + "ipv6"); + + if (nexthop->ifindex) { + json_object_int_add(json_nexthop, + "interfaceIndex", + nexthop->ifindex); + json_object_string_add( + json_nexthop, "interfaceName", + ifindex2ifname( + nexthop->ifindex, + nexthop->vrf_id)); + } + break; + + case NEXTHOP_TYPE_IFINDEX: + json_object_boolean_true_add( + json_nexthop, "directlyConnected"); + json_object_int_add(json_nexthop, + "interfaceIndex", + nexthop->ifindex); + json_object_string_add( + json_nexthop, "interfaceName", + ifindex2ifname(nexthop->ifindex, + nexthop->vrf_id)); + break; + case NEXTHOP_TYPE_BLACKHOLE: + json_object_boolean_true_add(json_nexthop, + "unreachable"); + switch (nexthop->bh_type) { + case BLACKHOLE_REJECT: + json_object_boolean_true_add( + json_nexthop, "reject"); + break; + case BLACKHOLE_ADMINPROHIB: + json_object_boolean_true_add( + json_nexthop, + "admin-prohibited"); + break; + case BLACKHOLE_NULL: + json_object_boolean_true_add( + json_nexthop, "blackhole"); + break; + case BLACKHOLE_UNSPEC: + break; + } + break; + default: + break; + } + + if ((nexthop->vrf_id != re->vrf_id) + && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) { + vrf = vrf_lookup_by_id(nexthop->vrf_id); + json_object_string_add(json_nexthop, "vrf", + vrf->name); + } + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE)) + json_object_boolean_true_add(json_nexthop, + "duplicate"); + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) + json_object_boolean_true_add(json_nexthop, + "active"); + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) + json_object_boolean_true_add(json_nexthop, + "onLink"); + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) + json_object_boolean_true_add(json_nexthop, + "recursive"); + + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) + json_object_int_add(json_nexthop, "backupIndex", + nexthop->backup_idx); + + switch (nexthop->type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + if (nexthop->src.ipv4.s_addr) { + if (inet_ntop(AF_INET, + &nexthop->src.ipv4, buf, + sizeof(buf))) + json_object_string_add( + json_nexthop, "source", + buf); + } + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, + &in6addr_any)) { + if (inet_ntop(AF_INET6, + &nexthop->src.ipv6, buf, + sizeof(buf))) + json_object_string_add( + json_nexthop, "source", + buf); + } + break; + default: + break; + } + + if (nexthop->nh_label + && nexthop->nh_label->num_labels) { + json_labels = json_object_new_array(); + + for (int label_index = 0; + label_index + < nexthop->nh_label->num_labels; + label_index++) + json_object_array_add( + json_labels, + json_object_new_int( + nexthop->nh_label->label + [label_index])); + + json_object_object_add(json_nexthop, "labels", + json_labels); + } + + if (nexthop->weight) + json_object_int_add(json_nexthop, "weight", + nexthop->weight); + } static void vty_show_ip_route(struct vty *vty, struct route_node *rn, @@ -530,12 +711,12 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn, json_object *json_nexthops = NULL; json_object *json_nexthop = NULL; json_object *json_route = NULL; - json_object *json_labels = NULL; time_t uptime; struct vrf *vrf = NULL; rib_dest_t *dest = rib_dest_from_rnode(rn); struct nexthop_group *nhg; char up_str[MONOTIME_STRLEN]; + bool first_p; uptime = monotime(NULL); uptime -= re->uptime; @@ -546,7 +727,7 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn, * nexthops. */ if (is_fib) - nhg = rib_active_nhg(re); + nhg = rib_get_fib_nhg(re); else nhg = &(re->nhe->nhg); @@ -611,177 +792,44 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn, for (ALL_NEXTHOPS_PTR(nhg, nexthop)) { json_nexthop = json_object_new_object(); - json_object_int_add(json_nexthop, "flags", - nexthop->flags); + show_nexthop_json_helper(json_nexthop, nexthop, re); + json_object_array_add(json_nexthops, json_nexthop); + } - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE)) - json_object_boolean_true_add(json_nexthop, - "duplicate"); + json_object_object_add(json_route, "nexthops", json_nexthops); - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) - json_object_boolean_true_add(json_nexthop, - "fib"); + /* If there are backup nexthops, include them */ + if (is_fib) + nhg = rib_get_fib_backup_nhg(re); + else + nhg = zebra_nhg_get_backup_nhg(re->nhe); - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - json_object_string_add( - json_nexthop, "ip", - inet_ntoa(nexthop->gate.ipv4)); - json_object_string_add(json_nexthop, "afi", - "ipv4"); - - if (nexthop->ifindex) { - json_object_int_add(json_nexthop, - "interfaceIndex", - nexthop->ifindex); - json_object_string_add( - json_nexthop, "interfaceName", - ifindex2ifname( - nexthop->ifindex, - nexthop->vrf_id)); - } - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - json_object_string_add( - json_nexthop, "ip", - inet_ntop(AF_INET6, &nexthop->gate.ipv6, - buf, sizeof(buf))); - json_object_string_add(json_nexthop, "afi", - "ipv6"); - - if (nexthop->ifindex) { - json_object_int_add(json_nexthop, - "interfaceIndex", - nexthop->ifindex); - json_object_string_add( - json_nexthop, "interfaceName", - ifindex2ifname( - nexthop->ifindex, - nexthop->vrf_id)); - } - break; - - case NEXTHOP_TYPE_IFINDEX: - json_object_boolean_true_add( - json_nexthop, "directlyConnected"); - json_object_int_add(json_nexthop, - "interfaceIndex", - nexthop->ifindex); - json_object_string_add( - json_nexthop, "interfaceName", - ifindex2ifname(nexthop->ifindex, - nexthop->vrf_id)); - break; - case NEXTHOP_TYPE_BLACKHOLE: - json_object_boolean_true_add(json_nexthop, - "unreachable"); - switch (nexthop->bh_type) { - case BLACKHOLE_REJECT: - json_object_boolean_true_add( - json_nexthop, "reject"); - break; - case BLACKHOLE_ADMINPROHIB: - json_object_boolean_true_add( - json_nexthop, - "admin-prohibited"); - break; - case BLACKHOLE_NULL: - json_object_boolean_true_add( - json_nexthop, "blackhole"); - break; - case BLACKHOLE_UNSPEC: - break; - } - break; - default: - break; - } + if (nhg) { + json_nexthops = json_object_new_array(); - if ((nexthop->vrf_id != re->vrf_id) - && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) { - vrf = vrf_lookup_by_id(nexthop->vrf_id); - json_object_string_add(json_nexthop, "vrf", - vrf->name); - } - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE)) - json_object_boolean_true_add(json_nexthop, - "duplicate"); - - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) - json_object_boolean_true_add(json_nexthop, - "active"); - - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) - json_object_boolean_true_add(json_nexthop, - "onLink"); - - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) - json_object_boolean_true_add(json_nexthop, - "recursive"); - - switch (nexthop->type) { - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - if (nexthop->src.ipv4.s_addr) { - if (inet_ntop(AF_INET, - &nexthop->src.ipv4, buf, - sizeof(buf))) - json_object_string_add( - json_nexthop, "source", - buf); - } - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, - &in6addr_any)) { - if (inet_ntop(AF_INET6, - &nexthop->src.ipv6, buf, - sizeof(buf))) - json_object_string_add( - json_nexthop, "source", - buf); - } - break; - default: - break; - } + for (ALL_NEXTHOPS_PTR(nhg, nexthop)) { + json_nexthop = json_object_new_object(); - if (nexthop->nh_label - && nexthop->nh_label->num_labels) { - json_labels = json_object_new_array(); - - for (int label_index = 0; - label_index - < nexthop->nh_label->num_labels; - label_index++) - json_object_array_add( - json_labels, - json_object_new_int( - nexthop->nh_label->label - [label_index])); - - json_object_object_add(json_nexthop, "labels", - json_labels); + show_nexthop_json_helper(json_nexthop, + nexthop, re); + json_object_array_add(json_nexthops, + json_nexthop); } - if (nexthop->weight) - json_object_int_add(json_nexthop, "weight", - nexthop->weight); - - json_object_array_add(json_nexthops, json_nexthop); + json_object_object_add(json_route, "backupNexthops", + json_nexthops); } - json_object_object_add(json_route, "nexthops", json_nexthops); json_object_array_add(json, json_route); return; } /* Nexthop information. */ + first_p = true; for (ALL_NEXTHOPS_PTR(nhg, nexthop)) { - if (nexthop == nhg->nexthop) { + if (first_p) { + first_p = false; + /* Prefix information. */ len = vty_out(vty, "%c", zebra_route_char(re->type)); if (re->instance) @@ -808,40 +856,36 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn, show_route_nexthop_helper(vty, re, nexthop); - if (nexthop->weight) - vty_out(vty, ", weight %u", nexthop->weight); - vty_out(vty, ", %s\n", up_str); + } - /* Check for backup info */ - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) { - struct nexthop *backup; - int i; + /* Check for backup info if present */ + if (is_fib) + nhg = rib_get_fib_backup_nhg(re); + else + nhg = zebra_nhg_get_backup_nhg(re->nhe); - if (re->nhe->backup_info == NULL || - re->nhe->backup_info->nhe == NULL) - continue; + if (nhg == NULL) + return; - i = 0; - for (ALL_NEXTHOPS(re->nhe->backup_info->nhe->nhg, - backup)) { - if (i == nexthop->backup_idx) - break; - i++; - } + /* Print backup info */ + for (ALL_NEXTHOPS_PTR(nhg, nexthop)) { + bool star_p = false; - /* Print useful backup info */ - if (backup) { - /* TODO -- install state is not accurate */ - vty_out(vty, " %*c [backup %d]", - /*re_status_output_char(re, backup),*/ - len - 3 + (2 * nexthop_level(nexthop)), - ' ', nexthop->backup_idx); - show_route_nexthop_helper(vty, re, backup); - vty_out(vty, "\n"); - } - } + if (is_fib) + star_p = CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); + + /* TODO -- it'd be nice to be able to include + * the entire list of backups, *and* include the + * real installation state. + */ + vty_out(vty, " b%c %*c", + (star_p ? '*' : ' '), + len - 3 + (2 * nexthop_level(nexthop)), ' '); + show_route_nexthop_helper(vty, re, nexthop); + vty_out(vty, "\n"); } + } static void vty_show_ip_route_detail_json(struct vty *vty, |
