diff options
32 files changed, 1534 insertions, 580 deletions
diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c index 34d4be8c93..bc6d4e144e 100644 --- a/bgpd/bgp_clist.c +++ b/bgpd/bgp_clist.c @@ -674,9 +674,6 @@ bool community_list_match(struct community *com, struct community_list *list) return entry->direct == COMMUNITY_PERMIT; if (entry->style == COMMUNITY_LIST_STANDARD) { - if (community_include(entry->u.com, COMMUNITY_INTERNET)) - return entry->direct == COMMUNITY_PERMIT; - if (community_match(com, entry->u.com)) return entry->direct == COMMUNITY_PERMIT; } else if (entry->style == COMMUNITY_LIST_EXPANDED) { diff --git a/debian/control b/debian/control index e8bf1a8ffa..06c16cc945 100644 --- a/debian/control +++ b/debian/control @@ -30,6 +30,7 @@ Build-Depends: bison, python3-pytest <!nocheck>, python3-sphinx, texinfo (>= 4.7), + lua5.3 <pkg.frr.lua>, liblua5.3-dev <pkg.frr.lua> Standards-Version: 4.5.0.3 Homepage: https://www.frrouting.org/ diff --git a/nhrpd/nhrp_interface.c b/nhrpd/nhrp_interface.c index 1092ce13a1..4ac30a7d75 100644 --- a/nhrpd/nhrp_interface.c +++ b/nhrpd/nhrp_interface.c @@ -165,8 +165,7 @@ static void nhrp_interface_interface_notifier(struct notifier_block *n, switch (cmd) { case NOTIFY_INTERFACE_CHANGED: - nhrp_interface_update_mtu(nifp->ifp, AFI_IP); - nhrp_interface_update_source(nifp->ifp); + nhrp_interface_update_nbma(nifp->ifp, NULL); break; case NOTIFY_INTERFACE_ADDRESS_CHANGED: nifp->nbma = nbmanifp->afi[AFI_IP].addr; diff --git a/ospf6d/ospf6_abr.c b/ospf6d/ospf6_abr.c index 5af1139d9b..e9c42bb80c 100644 --- a/ospf6d/ospf6_abr.c +++ b/ospf6d/ospf6_abr.c @@ -488,7 +488,12 @@ int ospf6_abr_originate_summary_to_area(struct ospf6_route *route, zlog_debug( "Suppressed by range %pFX of area %s", &range->prefix, route_area->name); - ospf6_abr_delete_route(summary, summary_table, old); + /* The existing summary route could be a range, don't + * remove it in this case + */ + if (summary && summary->type != OSPF6_DEST_TYPE_RANGE) + ospf6_abr_delete_route(summary, summary_table, + old); return 0; } } diff --git a/pimd/pim6_mld.c b/pimd/pim6_mld.c index c5c98d8024..60602ee04a 100644 --- a/pimd/pim6_mld.c +++ b/pimd/pim6_mld.c @@ -2246,8 +2246,16 @@ void gm_ifp_update(struct interface *ifp) return; } - if (!pim_ifp->mld) + /* + * If ipv6 mld is not enabled on interface, do not start mld activites. + */ + if (!pim_ifp->gm_enable) + return; + + if (!pim_ifp->mld) { + changed = true; gm_start(ifp); + } gm_ifp = pim_ifp->mld; if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest)) diff --git a/pimd/pim_addr.h b/pimd/pim_addr.h index 2f2ff24675..defe4070cf 100644 --- a/pimd/pim_addr.h +++ b/pimd/pim_addr.h @@ -38,6 +38,7 @@ typedef struct in_addr pim_addr; #define PIM_AF_DBG "pim" #define PIM_MROUTE_DBG "mroute" #define PIMREG "pimreg" +#define GM "IGMP" #define PIM_ADDR_FUNCNAME(name) ipv4_##name @@ -64,6 +65,7 @@ typedef struct in6_addr pim_addr; #define PIM_AF_DBG "pimv6" #define PIM_MROUTE_DBG "mroute6" #define PIMREG "pim6reg" +#define GM "MLD" #define PIM_ADDR_FUNCNAME(name) ipv6_##name diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c index 1d3f5f430a..3092d1d3f5 100644 --- a/pimd/pim_cmd_common.c +++ b/pimd/pim_cmd_common.c @@ -888,6 +888,8 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json) { struct pim_upstream *up; time_t now = pim_time_monotonic_sec(); + struct ttable *tt = NULL; + char *table = NULL; json_object *json_group = NULL; json_object *json_row = NULL; @@ -895,8 +897,15 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json) if (!json) { vty_out(vty, "\n"); - vty_out(vty, - "Source Group RpfIface RpfAddress RibNextHop Metric Pref\n"); + + /* Prepare table. */ + tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]); + ttable_add_row( + tt, + "Source|Group|RpfIface|RpfAddress|RibNextHop|Metric|Pref"); + tt->style.cell.rpad = 2; + tt->style.corner = '+'; + ttable_restyle(tt); } frr_each (rb_pim_upstream, &pim->upstream_head, up) { @@ -944,8 +953,8 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json) json_object_object_add(json_group, src_str, json_row); } else { - vty_out(vty, - "%-15pPAs %-15pPAs %-16s %-15pPA %-15pPAs %6d %4d\n", + ttable_add_row( + tt, "%pPAs|%pPAs|%s|%pPA|%pPAs|%d|%d", &up->sg.src, &up->sg.grp, rpf_ifname, &rpf->rpf_addr, &rpf->source_nexthop.mrib_nexthop_addr, @@ -953,14 +962,27 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json) rpf->source_nexthop.mrib_metric_preference); } } + /* Dump the generated table. */ + if (!json) { + table = ttable_dump(tt, "\n"); + vty_out(vty, "%s\n", table); + XFREE(MTYPE_TMP, table); + ttable_del(tt); + } } void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty) { struct interface *ifp; + struct ttable *tt = NULL; + char *table = NULL; - vty_out(vty, - "Interface Address Neighbor Secondary \n"); + /* Prepare table. */ + tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]); + ttable_add_row(tt, "Interface|Address|Neighbor|Secondary"); + tt->style.cell.rpad = 2; + tt->style.corner = '+'; + ttable_restyle(tt); FOR_ALL_INTERFACES (pim->vrf, ifp) { struct pim_interface *pim_ifp; @@ -988,12 +1010,16 @@ void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty) for (ALL_LIST_ELEMENTS_RO(neigh->prefix_list, prefix_node, p)) - vty_out(vty, - "%-16s %-15pPAs %-15pPAs %-15pFX\n", - ifp->name, &ifaddr, &neigh->source_addr, - p); + ttable_add_row(tt, "%s|%pPAs|%pPAs|%pFX", + ifp->name, &ifaddr, + &neigh->source_addr, p); } } + /* Dump the generated table. */ + table = ttable_dump(tt, "\n"); + vty_out(vty, "%s\n", table); + XFREE(MTYPE_TMP, table); + ttable_del(tt); } void pim_show_state(struct pim_instance *pim, struct vty *vty, @@ -1317,15 +1343,24 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg, json_object *json) { struct pim_upstream *up; + struct ttable *tt = NULL; + char *table = NULL; time_t now; json_object *json_group = NULL; json_object *json_row = NULL; now = pim_time_monotonic_sec(); - if (!json) - vty_out(vty, - "Iif Source Group State Uptime JoinTimer RSTimer KATimer RefCnt\n"); + if (!json) { + /* Prepare table. */ + tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]); + ttable_add_row( + tt, + "Iif|Source|Group|State|Uptime|JoinTimer|RSTimer|KATimer|RefCnt"); + tt->style.cell.rpad = 2; + tt->style.corner = '+'; + ttable_restyle(tt); + } frr_each (rb_pim_upstream, &pim->upstream_head, up) { char uptime[10]; @@ -1446,8 +1481,8 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty, json_object_int_add(json_row, "sptBit", up->sptbit); json_object_object_add(json_group, src_str, json_row); } else { - vty_out(vty, - "%-16s%-15pPAs %-15pPAs %-11s %-8s %-9s %-9s %-9s %6d\n", + ttable_add_row(tt, + "%s|%pPAs|%pPAs|%s|%s|%s|%s|%s|%d", up->rpf.source_nexthop.interface ? up->rpf.source_nexthop.interface->name : "Unknown", @@ -1455,12 +1490,20 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty, join_timer, rs_timer, ka_timer, up->ref_count); } } + /* Dump the generated table. */ + if (!json) { + table = ttable_dump(tt, "\n"); + vty_out(vty, "%s\n", table); + XFREE(MTYPE_TMP, table); + ttable_del(tt); + } } static void pim_show_join_desired_helper(struct pim_instance *pim, struct vty *vty, struct pim_upstream *up, - json_object *json, bool uj) + json_object *json, bool uj, + struct ttable *tt) { json_object *json_group = NULL; json_object *json_row = NULL; @@ -1491,45 +1534,68 @@ static void pim_show_join_desired_helper(struct pim_instance *pim, json_object_object_add(json_group, src_str, json_row); } else { - vty_out(vty, "%-15pPAs %-15pPAs %-6s\n", &up->sg.src, - &up->sg.grp, - pim_upstream_evaluate_join_desired(pim, up) ? "yes" - : "no"); + ttable_add_row(tt, "%pPAs|%pPAs|%s", &up->sg.src, &up->sg.grp, + pim_upstream_evaluate_join_desired(pim, up) + ? "yes" + : "no"); } } void pim_show_join_desired(struct pim_instance *pim, struct vty *vty, bool uj) { struct pim_upstream *up; + struct ttable *tt = NULL; + char *table = NULL; json_object *json = NULL; if (uj) json = json_object_new_object(); - else - vty_out(vty, "Source Group EvalJD\n"); + else { + /* Prepare table. */ + tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]); + ttable_add_row(tt, "Source|Group|EvalJD"); + tt->style.cell.rpad = 2; + tt->style.corner = '+'; + ttable_restyle(tt); + } frr_each (rb_pim_upstream, &pim->upstream_head, up) { /* scan all interfaces */ - pim_show_join_desired_helper(pim, vty, up, json, uj); + pim_show_join_desired_helper(pim, vty, up, json, uj, tt); } if (uj) vty_json(vty, json); + else { + /* Dump the generated table. */ + table = ttable_dump(tt, "\n"); + vty_out(vty, "%s\n", table); + XFREE(MTYPE_TMP, table); + ttable_del(tt); + } } void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj) { struct pim_upstream *up; + struct ttable *tt = NULL; + char *table = NULL; json_object *json = NULL; json_object *json_group = NULL; json_object *json_row = NULL; if (uj) json = json_object_new_object(); - else - vty_out(vty, - "Source Group RpfIface RibNextHop RpfAddress \n"); + else { + /* Prepare table. */ + tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]); + ttable_add_row(tt, + "Source|Group|RpfIface|RibNextHop|RpfAddress"); + tt->style.cell.rpad = 2; + tt->style.corner = '+'; + ttable_restyle(tt); + } frr_each (rb_pim_upstream, &pim->upstream_head, up) { struct pim_rpf *rpf; @@ -1571,16 +1637,22 @@ void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj) &rpf->rpf_addr); json_object_object_add(json_group, src_str, json_row); } else { - vty_out(vty, - "%-15pPAs %-15pPAs %-16s %-15pPA %-15pPA\n", - &up->sg.src, &up->sg.grp, rpf_ifname, - &rpf->source_nexthop.mrib_nexthop_addr, - &rpf->rpf_addr); + ttable_add_row(tt, "%pPAs|%pPAs|%s|%pPA|%pPA", + &up->sg.src, &up->sg.grp, rpf_ifname, + &rpf->source_nexthop.mrib_nexthop_addr, + &rpf->rpf_addr); } } if (uj) vty_json(vty, json); + else { + /* Dump the generated table. */ + table = ttable_dump(tt, "\n"); + vty_out(vty, "%s\n", table); + XFREE(MTYPE_TMP, table); + ttable_del(tt); + } } static void pim_show_join_helper(struct vty *vty, struct pim_interface *pim_ifp, @@ -3817,7 +3889,6 @@ void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg, static void show_mroute_count_per_channel_oil(struct channel_oil *c_oil, json_object *json, - struct vty *vty, struct ttable *tt) { json_object *json_group = NULL; @@ -3885,10 +3956,10 @@ void show_mroute_count(struct pim_instance *pim, struct vty *vty, /* Print PIM and IGMP route counts */ frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) - show_mroute_count_per_channel_oil(c_oil, json, vty, tt); + show_mroute_count_per_channel_oil(c_oil, json, tt); for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, sr)) - show_mroute_count_per_channel_oil(&sr->c_oil, json, vty, tt); + show_mroute_count_per_channel_oil(&sr->c_oil, json, tt); /* Dump the generated table. */ if (!json) { diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c index 0fb5e8c6d9..e03e5a2630 100644 --- a/pimd/pim_iface.c +++ b/pimd/pim_iface.c @@ -115,7 +115,7 @@ static int pim_sec_addr_comp(const void *p1, const void *p2) return 0; } -struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim, +struct pim_interface *pim_if_new(struct interface *ifp, bool gm, bool pim, bool ispimreg, bool is_vxlan_term) { struct pim_interface *pim_ifp; @@ -154,9 +154,7 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim, pim_ifp->pim_enable = pim; pim_ifp->pim_passive_enable = false; -#if PIM_IPV == 4 - pim_ifp->gm_enable = igmp; -#endif + pim_ifp->gm_enable = gm; pim_ifp->gm_join_list = NULL; pim_ifp->pim_neighbor_list = NULL; @@ -810,7 +808,7 @@ void pim_if_addr_add_all(struct interface *ifp) ifp->name); } /* - * PIM or IGMP is enabled on interface, and there is at least one + * PIM or IGMP/MLD is enabled on interface, and there is at least one * address assigned, then try to create a vif_index. */ if (pim_ifp->mroute_vif_index < 0) { diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index 408e86b698..72b16a5f49 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -348,8 +348,7 @@ static bool is_pim_interface(const struct lyd_node *dnode) return false; } -#if PIM_IPV == 4 -static int pim_cmd_igmp_start(struct interface *ifp) +static int pim_cmd_gm_start(struct interface *ifp) { struct pim_interface *pim_ifp; uint8_t need_startup = 0; @@ -377,7 +376,6 @@ static int pim_cmd_igmp_start(struct interface *ifp) return NB_OK; } -#endif /* PIM_IPV == 4 */ /* * CLI reconfiguration affects the interface level (struct pim_interface). @@ -2584,7 +2582,6 @@ int lib_interface_gmp_address_family_destroy(struct nb_cb_destroy_args *args) int lib_interface_gmp_address_family_enable_modify( struct nb_cb_modify_args *args) { -#if PIM_IPV == 4 struct interface *ifp; bool gm_enable; struct pim_interface *pim_ifp; @@ -2600,9 +2597,10 @@ int lib_interface_gmp_address_family_enable_modify( /* Limiting mcast interfaces to number of VIFs */ if (mcast_if_count == MAXVIFS) { ifp_name = yang_dnode_get_string(if_dnode, "name"); - snprintf(args->errmsg, args->errmsg_len, - "Max multicast interfaces(%d) Reached. Could not enable IGMP on interface %s", - MAXVIFS, ifp_name); + snprintf( + args->errmsg, args->errmsg_len, + "Max multicast interfaces(%d) Reached. Could not enable %s on interface %s", + MAXVIFS, GM, ifp_name); return NB_ERR_VALIDATION; } break; @@ -2614,7 +2612,7 @@ int lib_interface_gmp_address_family_enable_modify( gm_enable = yang_dnode_get_bool(args->dnode, NULL); if (gm_enable) - return pim_cmd_igmp_start(ifp); + return pim_cmd_gm_start(ifp); else { pim_ifp = ifp->info; @@ -2626,15 +2624,16 @@ int lib_interface_gmp_address_family_enable_modify( pim_if_membership_clear(ifp); +#if PIM_IPV == 4 pim_if_addr_del_all_igmp(ifp); +#else + gm_ifp_teardown(ifp); +#endif if (!pim_ifp->pim_enable) pim_if_delete(ifp); } } -#else - /* TBD Depends on MLD data structure changes */ -#endif /* PIM_IPV == 4 */ return NB_OK; } diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c index 783c9b97e7..1dce6b3562 100644 --- a/pimd/pim_rp.c +++ b/pimd/pim_rp.c @@ -51,6 +51,7 @@ #include "pim_bsm.h" #include "pim_util.h" #include "pim_ssm.h" +#include "termtable.h" /* Cleanup pim->rpf_hash each node data */ void pim_rp_list_hash_clean(void *data) @@ -1166,14 +1167,25 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range, struct rp_info *rp_info; struct rp_info *prev_rp_info = NULL; struct listnode *node; + struct ttable *tt = NULL; + char *table = NULL; char source[7]; + char grp[INET6_ADDRSTRLEN]; json_object *json_rp_rows = NULL; json_object *json_row = NULL; - if (!json) - vty_out(vty, - "RP address group/prefix-list OIF I am RP Source Group-Type\n"); + if (!json) { + /* Prepare table. */ + tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]); + ttable_add_row( + tt, + "RP address|group/prefix-list|OIF|I am RP|Source|Group-Type"); + tt->style.cell.rpad = 2; + tt->style.corner = '+'; + ttable_restyle(tt); + } + for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { if (pim_rpf_addr_is_inaddr_any(&rp_info->rp)) continue; @@ -1243,32 +1255,31 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range, json_object_array_add(json_rp_rows, json_row); } else { - vty_out(vty, "%-15pPA ", &rp_info->rp.rpf_addr); - - if (rp_info->plist) - vty_out(vty, "%-18s ", rp_info->plist); - else - vty_out(vty, "%-18pFX ", &rp_info->group); - - if (rp_info->rp.source_nexthop.interface) - vty_out(vty, "%-16s ", - rp_info->rp.source_nexthop - .interface->name); - else - vty_out(vty, "%-16s ", "(Unknown)"); - - if (rp_info->i_am_rp) - vty_out(vty, "yes"); - else - vty_out(vty, "no"); - - vty_out(vty, "%14s", source); - vty_out(vty, "%6s\n", group_type); + prefix2str(&rp_info->group, grp, sizeof(grp)); + ttable_add_row(tt, "%pPA|%s|%s|%s|%s|%s", + &rp_info->rp.rpf_addr, + rp_info->plist + ? rp_info->plist + : grp, + rp_info->rp.source_nexthop.interface + ? rp_info->rp.source_nexthop + .interface->name + : "Unknown", + rp_info->i_am_rp + ? "yes" + : "no", + source, group_type); } prev_rp_info = rp_info; } - if (json) { + /* Dump the generated table. */ + if (!json) { + table = ttable_dump(tt, "\n"); + vty_out(vty, "%s\n", table); + XFREE(MTYPE_TMP, table); + ttable_del(tt); + } else { if (prev_rp_info && json_rp_rows) json_object_object_addf(json, json_rp_rows, "%pPA", &prev_rp_info->rp.rpf_addr); diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c index 3d5d68b1f4..cfbd436981 100644 --- a/pimd/pim_vty.c +++ b/pimd/pim_vty.c @@ -370,6 +370,12 @@ static int gm_config_write(struct vty *vty, int writes, static int gm_config_write(struct vty *vty, int writes, struct pim_interface *pim_ifp) { + /* IF ipv6 mld */ + if (pim_ifp->gm_enable) { + vty_out(vty, " ipv6 mld\n"); + ++writes; + } + if (pim_ifp->mld_version != MLD_DEFAULT_VERSION) vty_out(vty, " ipv6 mld version %d\n", pim_ifp->mld_version); if (pim_ifp->gm_default_query_interval != IGMP_GENERAL_QUERY_INTERVAL) diff --git a/tests/topotests/bgp_clist/__init__.py b/tests/topotests/bgp_clist/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_clist/__init__.py diff --git a/tests/topotests/bgp_clist/r1/bgpd.conf b/tests/topotests/bgp_clist/r1/bgpd.conf new file mode 100644 index 0000000000..68c5222e92 --- /dev/null +++ b/tests/topotests/bgp_clist/r1/bgpd.conf @@ -0,0 +1,28 @@ +router bgp 65001 + no bgp ebgp-requires-policy + neighbor 192.168.255.2 remote-as 65002 + neighbor 192.168.255.2 timers 3 10 + address-family ipv4 + redistribute connected route-map connected + neighbor 192.168.255.2 route-map r2 out + exit-address-family +! +ip prefix-list p1 seq 5 permit 172.16.255.253/32 +ip prefix-list p2 seq 5 permit 172.16.255.254/32 +! +bgp community-list standard OUT_AS_PERMIT seq 5 permit internet +bgp community-list standard OUT_AS_PERMIT seq 10 deny 4:1 +bgp community-list standard OUT_AS_PERMIT seq 20 permit 3:1 +! +route-map r2 permit 10 + match community OUT_AS_PERMIT + set community 123:123 additive +exit +! +route-map connected permit 10 + match ip address prefix-list p1 + set community 3:1 +route-map connected permit 20 + match ip address prefix-list p2 + set community 4:1 +exit diff --git a/tests/topotests/bgp_clist/r1/zebra.conf b/tests/topotests/bgp_clist/r1/zebra.conf new file mode 100644 index 0000000000..ae668d79ed --- /dev/null +++ b/tests/topotests/bgp_clist/r1/zebra.conf @@ -0,0 +1,10 @@ +! +interface lo + ip address 172.16.255.253/32 + ip address 172.16.255.254/32 +! +interface r1-eth0 + ip address 192.168.255.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_clist/r2/bgpd.conf b/tests/topotests/bgp_clist/r2/bgpd.conf new file mode 100644 index 0000000000..cdc0d217be --- /dev/null +++ b/tests/topotests/bgp_clist/r2/bgpd.conf @@ -0,0 +1,5 @@ +router bgp 65002 + no bgp ebgp-requires-policy + neighbor 192.168.255.1 remote-as 65001 + neighbor 192.168.255.1 timers 3 10 +! diff --git a/tests/topotests/bgp_clist/r2/zebra.conf b/tests/topotests/bgp_clist/r2/zebra.conf new file mode 100644 index 0000000000..606c17bec9 --- /dev/null +++ b/tests/topotests/bgp_clist/r2/zebra.conf @@ -0,0 +1,6 @@ +! +interface r2-eth0 + ip address 192.168.255.2/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_clist/test_bgp_clist.py b/tests/topotests/bgp_clist/test_bgp_clist.py new file mode 100644 index 0000000000..93825f441b --- /dev/null +++ b/tests/topotests/bgp_clist/test_bgp_clist.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2022 by +# Donatas Abraitis <donatas@opensourcerouting.org> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Test if basic BGP community-list filtering works correctly. +""" + +import os +import sys +import json +import pytest +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + for routern in range(1, 3): + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + +def setup_module(mod): + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for i, (rname, router) in enumerate(router_list.items(), 1): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_clist(): + tgen = get_topogen() + + router = tgen.gears["r2"] + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + def _bgp_converge(): + output = json.loads(router.vtysh_cmd("show bgp summary json")) + expected = { + "ipv4Unicast": { + "peers": {"192.168.255.1": {"state": "Established", "pfxRcd": 1}} + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_converge) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "BGP can't converge" + + def _bgp_clist_match(): + output = json.loads(router.vtysh_cmd("show bgp ipv4 unicast json detail")) + expected = { + "routes": { + "172.16.255.253/32": [ + {"valid": True, "community": {"string": "3:1 123:123"}} + ], + "172.16.255.254/32": None, + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_clist_match) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "BGP community-list filtering doesn't work" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py index c8cdc7ec5c..4d7f436eac 100644 --- a/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py +++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py @@ -22,6 +22,7 @@ Following tests are covered. 1. Verify default-originate route with default static and network command 2. Verify default-originate route with aggregate summary command +3. Verfiy default-originate behaviour in ecmp """ import os import sys @@ -48,7 +49,10 @@ from lib.bgp import ( from lib.common_config import ( verify_fib_routes, step, + create_prefix_lists, run_frr_cmd, + create_route_maps, + shutdown_bringup_interface, get_frr_ipv6_linklocal, start_topology, apply_raw_config, @@ -296,6 +300,78 @@ def verify_the_uptime(time_stamp_before, time_stamp_after, incremented=None): return True +def get_best_path_route_in_FIB(tgen, topo, dut, network): + """ + API to verify the best route in FIB and return the ipv4 and ipv6 nexthop for the given route + command + ======= + show ip route + show ipv6 route + params + ====== + dut : device under test : + network ; route (ip) to which the best route to be retrieved + Returns + ======== + on success : return dict with next hops for the best hop + on failure : return error message with boolean False + """ + is_ipv4_best_path_found = False + is_ipv6_best_path_found = False + rnode = tgen.routers()[dut] + ipv4_show_bgp_json = run_frr_cmd(rnode, "sh ip bgp json ", isjson=True) + ipv6_show_bgp_json = run_frr_cmd( + rnode, "sh ip bgp ipv6 unicast json ", isjson=True + ) + output_dict = {"ipv4": None, "ipv6": None} + ipv4_nxt_hop_count = len(ipv4_show_bgp_json["routes"][network["ipv4"]]) + for index in range(ipv4_nxt_hop_count): + if "bestpath" in ipv4_show_bgp_json["routes"][network["ipv4"]][index].keys(): + best_path_ip = ipv4_show_bgp_json["routes"][network["ipv4"]][index][ + "nexthops" + ][0]["ip"] + output_dict["ipv4"] = best_path_ip + logger.info( + "[DUT [{}]] Best path for the route {} is {} ".format( + dut, network["ipv4"], best_path_ip + ) + ) + is_ipv4_best_path_found = True + else: + logger.error("ERROR....! No Best Path Found in BGP RIB.... FAILED") + + ipv6_nxt_hop_count = len(ipv6_show_bgp_json["routes"][network["ipv6"]]) + for index in range(ipv6_nxt_hop_count): + if "bestpath" in ipv6_show_bgp_json["routes"][network["ipv6"]][index].keys(): + ip_add_count = len( + ipv6_show_bgp_json["routes"][network["ipv6"]][index]["nexthops"] + ) + for i_index in range(ip_add_count): + if ( + "global" + in ipv6_show_bgp_json["routes"][network["ipv6"]][index]["nexthops"][ + i_index + ]["scope"] + ): + best_path_ip = ipv6_show_bgp_json["routes"][network["ipv6"]][index][ + "nexthops" + ][i_index]["ip"] + output_dict["ipv6"] = best_path_ip + logger.info( + "[DUT [{}]] Best path for the route {} is {} ".format( + dut, network["ipv6"], best_path_ip + ) + ) + + else: + logger.error("ERROR....! No Best Path Found in BGP RIB.... FAILED") + if is_ipv4_best_path_found: + return output_dict + else: + logger.error("ERROR...! Unable to find the Best Path in the RIB") + return False + + ##################################################### # # Testcases @@ -1409,6 +1485,326 @@ def test_verify_bgp_default_originate_with_aggregate_summary_p1(request): write_test_footer(tc_name) +def test_verify_default_originate_with_2way_ecmp_p2(request): + """ + Summary: "Verify default-originate route with 3 way ECMP and traffic " + """ + + tgen = get_topogen() + global BGP_CONVERGENCE + global DEFAULT_ROUTES + DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"} + + if BGP_CONVERGENCE != True: + pytest.skip("skipped because of BGP Convergence failure") + # test case name + tc_name = request.node.name + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + reset_config_on_routers(tgen) + + step("Populating next-hops details") + r1_r2_ipv4_neighbor_ips = [] + r1_r2_ipv6_neighbor_ips = [] + r1_link = None + for index in range(1, 3): + r1_link = "r1-link" + str(index) + r1_r2_ipv4_neighbor_ips.append( + topo["routers"]["r2"]["links"][r1_link]["ipv4"].split("/")[0] + ) + r1_r2_ipv6_neighbor_ips.append( + topo["routers"]["r2"]["links"][r1_link]["ipv6"].split("/")[0] + ) + + step( + "Configure default-originate on R1 for all the neighbor of IPv4 and IPv6 peers " + ) + local_as = get_dut_as_number(tgen, dut="r1") + for index in range(2): + raw_config = { + "r1": { + "raw_config": [ + "router bgp {}".format(local_as), + "address-family ipv4 unicast", + "neighbor {} default-originate".format( + r1_r2_ipv4_neighbor_ips[index] + ), + "exit-address-family", + "address-family ipv6 unicast", + "neighbor {} default-originate ".format( + r1_r2_ipv6_neighbor_ips[index] + ), + "exit-address-family", + ] + } + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "After configuring default-originate command , verify default routes are advertised on R2 " + ) + + r2_link = None + for index in range(1, 3): + r2_link = "r2-link" + str(index) + ipv4_nxt_hop = topo["routers"]["r1"]["links"][r2_link]["ipv4"].split("/")[0] + interface = topo["routers"]["r1"]["links"][r2_link]["interface"] + ipv6_link_local_nxt_hop = get_frr_ipv6_linklocal(tgen, "r1", intf=interface) + DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local_nxt_hop} + + result = verify_rib_default_route( + tgen, + topo, + dut="r2", + routes=DEFAULT_ROUTES, + expected_nexthop=DEFAULT_ROUTE_NXT_HOP, + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Ping R1 configure IPv4 and IPv6 loopback address from R2") + pingaddr = topo["routers"]["r1"]["links"]["lo"]["ipv4"].split("/")[0] + router = tgen.gears["r2"] + output = router.run("ping -c 4 -w 4 {}".format(pingaddr)) + assert " 0% packet loss" in output, "Ping R1->R2 FAILED" + logger.info("Ping from R1 to R2 ... success") + + step("Shuting up the active route") + network = {"ipv4": "0.0.0.0/0", "ipv6": "::/0"} + ipv_dict = get_best_path_route_in_FIB(tgen, topo, dut="r2", network=network) + dut_links = topo["routers"]["r1"]["links"] + active_interface = None + for key, values in dut_links.items(): + ipv4_address = dut_links[key]["ipv4"].split("/")[0] + ipv6_address = dut_links[key]["ipv6"].split("/")[0] + if ipv_dict["ipv4"] == ipv4_address and ipv_dict["ipv6"] == ipv6_address: + active_interface = dut_links[key]["interface"] + + logger.info( + "Shutting down the interface {} on router {} ".format(active_interface, "r1") + ) + shutdown_bringup_interface(tgen, "r1", active_interface, False) + + step("Verify the complete convergence to fail after shutting the interface") + result = verify_bgp_convergence(tgen, topo, expected=False) + assert ( + result is not True + ), " Testcase {} : After shuting down the interface Convergence is expected to be Failed".format( + tc_name + ) + + step( + "Verify routes from active best path is not received from r1 after shuting the interface" + ) + r2_link = None + for index in range(1, 3): + r2_link = "r2-link" + str(index) + ipv4_nxt_hop = topo["routers"]["r1"]["links"][r2_link]["ipv4"].split("/")[0] + interface = topo["routers"]["r1"]["links"][r2_link]["interface"] + ipv6_link_local_nxt_hop = get_frr_ipv6_linklocal(tgen, "r1", intf=interface) + DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local_nxt_hop} + if index == 1: + result = verify_rib_default_route( + tgen, + topo, + dut="r2", + routes=DEFAULT_ROUTES, + expected_nexthop=DEFAULT_ROUTE_NXT_HOP, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + else: + result = verify_rib_default_route( + tgen, + topo, + dut="r2", + routes=DEFAULT_ROUTES, + expected_nexthop=DEFAULT_ROUTE_NXT_HOP, + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Ping R1 configure IPv4 and IPv6 loopback address from R2") + pingaddr = topo["routers"]["r1"]["links"]["lo"]["ipv4"].split("/")[0] + router = tgen.gears["r2"] + output = router.run("ping -c 4 -w 4 {}".format(pingaddr)) + assert " 0% packet loss" in output, "Ping R1->R2 FAILED" + logger.info("Ping from R1 to R2 ... success") + + step("No Shuting up the active route") + + shutdown_bringup_interface(tgen, "r1", active_interface, True) + + step("Verify the complete convergence after bringup the interface") + result = verify_bgp_convergence(tgen, topo) + assert ( + result is True + ), " Testcase {} : After bringing up the interface complete convergence is expected ".format( + tc_name + ) + + step("Verify all the routes are received from r1 after no shuting the interface") + r2_link = None + for index in range(1, 3): + r2_link = "r2-link" + str(index) + ipv4_nxt_hop = topo["routers"]["r1"]["links"][r2_link]["ipv4"].split("/")[0] + interface = topo["routers"]["r1"]["links"][r2_link]["interface"] + ipv6_link_local_nxt_hop = get_frr_ipv6_linklocal(tgen, "r1", intf=interface) + DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local_nxt_hop} + if index == 1: + result = verify_rib_default_route( + tgen, + topo, + dut="r2", + routes=DEFAULT_ROUTES, + expected_nexthop=DEFAULT_ROUTE_NXT_HOP, + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + else: + result = verify_rib_default_route( + tgen, + topo, + dut="r2", + routes=DEFAULT_ROUTES, + expected_nexthop=DEFAULT_ROUTE_NXT_HOP, + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Configure IPv4 and IPv6 route-map with deny option on R2 to filter default route 0.0.0.0/0 and 0::0/0" + ) + DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"} + input_dict_3 = { + "r2": { + "prefix_lists": { + "ipv4": { + "Pv4": [ + { + "seqid": "1", + "network": DEFAULT_ROUTES["ipv4"], + "action": "permit", + } + ] + }, + "ipv6": { + "Pv6": [ + { + "seqid": "1", + "network": DEFAULT_ROUTES["ipv6"], + "action": "permit", + } + ] + }, + } + } + } + result = create_prefix_lists(tgen, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + input_dict_3 = { + "r2": { + "route_maps": { + "RMv4": [ + { + "action": "deny", + "seq_id": "1", + "match": {"ipv4": {"prefix_lists": "Pv4"}}, + }, + ], + "RMv6": [ + { + "action": "deny", + "seq_id": "1", + "match": {"ipv6": {"prefix_lists": "Pv6"}}, + }, + ], + } + } + } + result = create_route_maps(tgen, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Apply route-map IN direction of R2 ( R2-R1) for IPv4 and IPv6 BGP neighbors") + r2_link = None + for index in range(1, 3): + r2_link = "r2-link" + str(index) + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + r2_link: { + "route_maps": [ + {"name": "RMv4", "direction": "in"} + ] + }, + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + r2_link: { + "route_maps": [ + {"name": "RMv6", "direction": "in"} + ] + }, + } + } + } + } + }, + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("After applying the route-map the routes are not expected in RIB ") + r2_link = None + for index in range(1, 3): + r2_link = "r2-link" + str(index) + ipv4_nxt_hop = topo["routers"]["r1"]["links"][r2_link]["ipv4"].split("/")[0] + interface = topo["routers"]["r1"]["links"][r2_link]["interface"] + ipv6_link_local_nxt_hop = get_frr_ipv6_linklocal(tgen, "r1", intf=interface) + DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local_nxt_hop} + + result = verify_rib_default_route( + tgen, + topo, + dut="r2", + routes=DEFAULT_ROUTES, + expected_nexthop=DEFAULT_ROUTE_NXT_HOP, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py index 120a3e82e4..f79ca71a64 100755 --- a/tests/topotests/conftest.py +++ b/tests/topotests/conftest.py @@ -363,7 +363,7 @@ def pytest_configure(config): # Check environment now that we have config if not diagnose_env(rundir): - pytest.exit("environment has errors, please read the logs") + pytest.exit("environment has errors, please read the logs in %s" % rundir) @pytest.fixture(autouse=True, scope="session") diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py index c51a187f28..04712eda87 100644 --- a/tests/topotests/lib/topogen.py +++ b/tests/topotests/lib/topogen.py @@ -1293,7 +1293,7 @@ def diagnose_env_linux(rundir): ) continue - logger.warning("could not find {} in {}".format(fname, frrdir)) + logger.error("could not find {} in {}".format(fname, frrdir)) ret = False else: if fname != "zebra": diff --git a/tools/frrcommon.sh.in b/tools/frrcommon.sh.in index 759d498379..b589ced965 100755 --- a/tools/frrcommon.sh.in +++ b/tools/frrcommon.sh.in @@ -272,7 +272,7 @@ all_start() { } all_stop() { - local pids reversed + local pids reversed need_zebra daemon_list enabled_daemons disabled_daemons [ "$1" = "--reallyall" ] && enabled_daemons="$enabled_daemons $disabled_daemons" @@ -282,13 +282,23 @@ all_stop() { reversed="$dmninst $reversed" done + # Stop zebra last, after trying to stop the other daemons for dmninst in $reversed; do + if [ "$dmninst" = "zebra" ]; then + need_zebra="yes" + continue + fi + daemon_stop "$dmninst" "$1" & pids="$pids $!" done for pid in $pids; do wait $pid done + + if [ -n "$need_zebra" ]; then + daemon_stop "zebra" + fi } all_status() { diff --git a/zebra/kernel_socket.c b/zebra/kernel_socket.c index cb549339af..4c089ee194 100644 --- a/zebra/kernel_socket.c +++ b/zebra/kernel_socket.c @@ -1112,14 +1112,6 @@ void rtm_read(struct rt_msghdr *rtm) } else return; - /* - * CHANGE: delete the old prefix, we have no further information - * to specify the route really - */ - if (rtm->rtm_type == RTM_CHANGE) - rib_delete(afi, SAFI_UNICAST, VRF_DEFAULT, ZEBRA_ROUTE_KERNEL, - 0, zebra_flags, &p, NULL, NULL, 0, RT_TABLE_MAIN, 0, - 0, true); if (rtm->rtm_type == RTM_GET || rtm->rtm_type == RTM_ADD || rtm->rtm_type == RTM_CHANGE) rib_add(afi, SAFI_UNICAST, VRF_DEFAULT, proto, 0, zebra_flags, diff --git a/zebra/redistribute.c b/zebra/redistribute.c index 1a28f8ceec..4a8fe938ed 100644 --- a/zebra/redistribute.c +++ b/zebra/redistribute.c @@ -685,15 +685,10 @@ int zebra_add_import_table_entry(struct zebra_vrf *zvrf, struct route_node *rn, zebra_del_import_table_entry(zvrf, rn, same); } - newre = XCALLOC(MTYPE_RE, sizeof(struct route_entry)); - newre->type = ZEBRA_ROUTE_TABLE; - newre->distance = zebra_import_table_distance[afi][re->table]; - newre->flags = re->flags; - newre->metric = re->metric; - newre->mtu = re->mtu; - newre->table = zvrf->table_id; - newre->uptime = monotime(NULL); - newre->instance = re->table; + newre = zebra_rib_route_entry_new( + 0, ZEBRA_ROUTE_TABLE, re->table, re->flags, re->nhe_id, + zvrf->table_id, re->metric, re->mtu, + zebra_import_table_distance[afi][re->table], re->tag); ng = nexthop_group_new(); copy_nexthops(&ng->nexthop, re->nhe->nhg.nexthop, NULL); diff --git a/zebra/rib.h b/zebra/rib.h index a40843e27f..dec5b2b8d6 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -178,15 +178,17 @@ struct route_entry { /* meta-queue structure: * sub-queue 0: nexthop group objects * sub-queue 1: EVPN/VxLAN objects - * sub-queue 2: connected - * sub-queue 3: kernel - * sub-queue 4: static - * sub-queue 5: RIP, RIPng, OSPF, OSPF6, IS-IS, EIGRP, NHRP - * sub-queue 6: iBGP, eBGP - * sub-queue 7: any other origin (if any) typically those that + * sub-queue 2: Early Route Processing + * sub-queue 3: Early Label Processing + * sub-queue 4: connected + * sub-queue 5: kernel + * sub-queue 6: static + * sub-queue 7: RIP, RIPng, OSPF, OSPF6, IS-IS, EIGRP, NHRP + * sub-queue 8: iBGP, eBGP + * sub-queue 9: any other origin (if any) typically those that * don't generate routes */ -#define MQ_SIZE 8 +#define MQ_SIZE 10 struct meta_queue { struct list *subq[MQ_SIZE]; uint32_t size; /* sum of lengths of all subqueues */ @@ -342,6 +344,12 @@ extern void _route_entry_dump(const char *func, union prefixconstptr pp, union prefixconstptr src_pp, const struct route_entry *re); +struct route_entry * +zebra_rib_route_entry_new(vrf_id_t vrf_id, int type, uint8_t instance, + uint32_t flags, uint32_t nhe_id, uint32_t table_id, + uint32_t metric, uint32_t mtu, uint8_t distance, + route_tag_t tag); + #define ZEBRA_RIB_LOOKUP_ERROR -1 #define ZEBRA_RIB_FOUND_EXACT 0 #define ZEBRA_RIB_FOUND_NOGATE 1 diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index 0eab1fa850..e883033d59 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -937,44 +937,38 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id, afi = AFI_IP6; if (h->nlmsg_type == RTM_NEWROUTE) { + struct route_entry *re; + struct nexthop_group *ng = NULL; + + re = zebra_rib_route_entry_new(vrf_id, proto, 0, flags, nhe_id, + table, metric, mtu, distance, + tag); + if (!nhe_id) + ng = nexthop_group_new(); if (!tb[RTA_MULTIPATH]) { - struct nexthop nh = {0}; + struct nexthop *nexthop, nh; if (!nhe_id) { nh = parse_nexthop_unicast( ns_id, rtm, tb, bh_type, index, prefsrc, gate, afi, vrf_id); + + nexthop = nexthop_new(); + *nexthop = nh; + nexthop_group_add_sorted(ng, nexthop); } - rib_add(afi, SAFI_UNICAST, vrf_id, proto, 0, flags, &p, - &src_p, &nh, nhe_id, table, metric, mtu, - distance, tag, startup); } else { /* This is a multipath route */ - struct route_entry *re; - struct nexthop_group *ng = NULL; struct rtnexthop *rtnh = (struct rtnexthop *)RTA_DATA(tb[RTA_MULTIPATH]); - re = XCALLOC(MTYPE_RE, sizeof(struct route_entry)); - re->type = proto; - re->distance = distance; - re->flags = flags; - re->metric = metric; - re->mtu = mtu; - re->vrf_id = vrf_id; - re->table = table; - re->uptime = monotime(NULL); - re->tag = tag; - re->nhe_id = nhe_id; - if (!nhe_id) { uint8_t nhop_num; /* Use temporary list of nexthops; parse * message payload's nexthops. */ - ng = nexthop_group_new(); nhop_num = parse_multipath_nexthops_unicast( ns_id, ng, rtm, rtnh, tb, @@ -989,23 +983,22 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id, ng = NULL; } } - - if (nhe_id || ng) - rib_add_multipath(afi, SAFI_UNICAST, &p, - &src_p, re, ng, startup); - else { - /* - * I really don't see how this is possible - * but since we are testing for it let's - * let the end user know why the route - * that was just received was swallowed - * up and forgotten - */ - zlog_err( - "%s: %pFX multipath RTM_NEWROUTE has a invalid nexthop group from the kernel", - __func__, &p); - XFREE(MTYPE_RE, re); - } + } + if (nhe_id || ng) + rib_add_multipath(afi, SAFI_UNICAST, &p, &src_p, re, ng, + startup); + else { + /* + * I really don't see how this is possible + * but since we are testing for it let's + * let the end user know why the route + * that was just received was swallowed + * up and forgotten + */ + zlog_err( + "%s: %pFX multipath RTM_NEWROUTE has a invalid nexthop group from the kernel", + __func__, &p); + XFREE(MTYPE_RE, re); } } else { if (nhe_id) { diff --git a/zebra/tc_netlink.c b/zebra/tc_netlink.c index 89ce075454..4fb0241d1d 100644 --- a/zebra/tc_netlink.c +++ b/zebra/tc_netlink.c @@ -294,7 +294,7 @@ static ssize_t netlink_tclass_msg_encode(int cmd, struct zebra_dplane_ctx *ctx, htb_opt.cbuffer = cbuffer; tc_calc_rate_table(&htb_opt.rate, rtab, mtu); - tc_calc_rate_table(&htb_opt.ceil, rtab, mtu); + tc_calc_rate_table(&htb_opt.ceil, ctab, mtu); htb_opt.ceil.mpu = htb_opt.rate.mpu = 0; htb_opt.ceil.overhead = htb_opt.rate.overhead = 0; diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index a578395ef8..761ba789b8 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -2034,7 +2034,7 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) struct nhg_backup_info *bnhg = NULL; int ret; vrf_id_t vrf_id; - struct nhg_hash_entry nhe; + struct nhg_hash_entry nhe, *n = NULL; s = msg; if (zapi_route_decode(s, &api) < 0) { @@ -2052,17 +2052,10 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) (int)api.message, api.flags); /* Allocate new route. */ - re = XCALLOC(MTYPE_RE, sizeof(struct route_entry)); - re->type = api.type; - re->instance = api.instance; - re->flags = api.flags; - re->uptime = monotime(NULL); - re->vrf_id = vrf_id; - - if (api.tableid) - re->table = api.tableid; - else - re->table = zvrf->table_id; + re = zebra_rib_route_entry_new( + vrf_id, api.type, api.instance, api.flags, api.nhgid, + api.tableid ? api.tableid : zvrf->table_id, api.metric, api.mtu, + api.distance, api.tag); if (!CHECK_FLAG(api.message, ZAPI_MESSAGE_NHG) && (!CHECK_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP) @@ -2087,9 +2080,6 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) &api.prefix); } - if (CHECK_FLAG(api.message, ZAPI_MESSAGE_NHG)) - re->nhe_id = api.nhgid; - if (!re->nhe_id && (!zapi_read_nexthops(client, &api.prefix, api.nexthops, api.flags, api.message, api.nexthop_num, @@ -2105,15 +2095,6 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) return; } - if (CHECK_FLAG(api.message, ZAPI_MESSAGE_DISTANCE)) - re->distance = api.distance; - if (CHECK_FLAG(api.message, ZAPI_MESSAGE_METRIC)) - re->metric = api.metric; - if (CHECK_FLAG(api.message, ZAPI_MESSAGE_TAG)) - re->tag = api.tag; - if (CHECK_FLAG(api.message, ZAPI_MESSAGE_MTU)) - re->mtu = api.mtu; - if (CHECK_FLAG(api.message, ZAPI_MESSAGE_OPAQUE)) { re->opaque = XMALLOC(MTYPE_RE_OPAQUE, @@ -2161,9 +2142,10 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) zebra_nhe_init(&nhe, afi, ng->nexthop); nhe.nhg.nexthop = ng->nexthop; nhe.backup_info = bnhg; + n = zebra_nhe_copy(&nhe, 0); } - ret = rib_add_multipath_nhe(afi, api.safi, &api.prefix, src_p, - re, &nhe, false); + ret = rib_add_multipath_nhe(afi, api.safi, &api.prefix, src_p, re, n, + false); /* * rib_add_multipath_nhe only fails in a couple spots diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c index 763c92ebb6..6a691a222f 100644 --- a/zebra/zebra_dplane.c +++ b/zebra/zebra_dplane.c @@ -2775,14 +2775,13 @@ int dplane_ctx_tc_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op) { int ret = EINVAL; - struct zebra_vrf *zvrf = NULL; struct zebra_ns *zns = NULL; ctx->zd_op = op; ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS; /* TODO: init traffic control qdisc */ - zns = zvrf ? zvrf->zns : zebra_ns_lookup(NS_DEFAULT); + zns = zebra_ns_lookup(NS_DEFAULT); dplane_ctx_ns_init(ctx, zns, true); @@ -3513,7 +3512,7 @@ dplane_route_update_internal(struct route_node *rn, static enum zebra_dplane_result dplane_tc_update_internal(enum dplane_op_e op) { enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE; - int ret = EINVAL; + int ret; struct zebra_dplane_ctx *ctx = NULL; /* Obtain context block */ diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c index 3010a516b9..9756d9ba08 100644 --- a/zebra/zebra_mpls.c +++ b/zebra/zebra_mpls.c @@ -2747,9 +2747,9 @@ static bool ftn_update_nexthop(bool add_p, struct nexthop *nexthop, return true; } -void mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, - struct prefix *prefix, uint8_t route_type, - unsigned short route_instance) +void zebra_mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, + struct prefix *prefix, uint8_t route_type, + uint8_t route_instance) { struct route_table *table; struct route_node *rn; @@ -2882,8 +2882,8 @@ static bool ftn_update_znh(bool add_p, enum lsp_types_t type, * There are several changes that need to be made, in several zebra * data structures, so we want to do all the work required at once. */ -void mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf, - const struct zapi_labels *zl) +void zebra_mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf, + const struct zapi_labels *zl) { int i, counter, ret = 0; char buf[NEXTHOP_STRLEN]; diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h index a114f01339..cf247861f8 100644 --- a/zebra/zebra_mpls.h +++ b/zebra/zebra_mpls.h @@ -260,17 +260,30 @@ void zebra_mpls_print_fec(struct vty *vty, struct zebra_vrf *zvrf, /* * Handle zapi request to install/uninstall LSP and * (optionally) FEC-To-NHLFE (FTN) bindings. + * + * mpls_zapi_labels_process -> Installs for future processing + * in the meta-q + * zebra_mpls_labels_process -> called by the meta-q */ void mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf, const struct zapi_labels *zl); +void zebra_mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf, + const struct zapi_labels *zl); /* * Uninstall all NHLFEs bound to a single FEC. + * + * mpls_ftn_uninstall -> Called to enqueue into early label processing + * via the metaq + * zebra_mpls_ftn_uninstall -> Called when we process the meta q + * for this item */ void mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, struct prefix *prefix, uint8_t route_type, - unsigned short route_instance); - + uint8_t route_instance); +void zebra_mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, + struct prefix *prefix, uint8_t route_type, + uint8_t route_instance); /* * Install/update a NHLFE for an LSP in the forwarding table. This may be * a new LSP entry or a new NHLFE for an existing in-label or an update of diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index 03bda8cc33..bd7e8bbbd0 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -81,6 +81,8 @@ DEFINE_HOOK(rib_update, (struct route_node * rn, const char *reason), enum meta_queue_indexes { META_QUEUE_NHG, META_QUEUE_EVPN, + META_QUEUE_EARLY_ROUTE, + META_QUEUE_EARLY_LABEL, META_QUEUE_CONNECTED, META_QUEUE_KERNEL, META_QUEUE_STATIC, @@ -173,6 +175,29 @@ struct wq_evpn_wrapper { #define WQ_EVPN_WRAPPER_TYPE_REM_MACIP 0x03 #define WQ_EVPN_WRAPPER_TYPE_REM_VTEP 0x04 +enum wq_label_types { + WQ_LABEL_FTN_UNINSTALL, + WQ_LABEL_LABELS_PROCESS, +}; + +struct wq_label_wrapper { + enum wq_label_types type; + vrf_id_t vrf_id; + + struct prefix p; + enum lsp_types_t ltype; + uint8_t route_type; + uint8_t route_instance; + + bool add_p; + struct zapi_labels zl; + + int afi; +}; + +static void rib_addnode(struct route_node *rn, struct route_entry *re, + int process); + /* %pRN is already a printer for route_nodes that just prints the prefix */ #ifdef _FRR_ATTRIBUTE_PRINTFRR #pragma FRR printfrr_ext "%pZN" (struct route_node *) @@ -185,6 +210,10 @@ static const char *subqueue2str(enum meta_queue_indexes index) return "NHG Objects"; case META_QUEUE_EVPN: return "EVPN/VxLan Objects"; + case META_QUEUE_EARLY_ROUTE: + return "Early Route Processing"; + case META_QUEUE_EARLY_LABEL: + return "Early Label Handling"; case META_QUEUE_CONNECTED: return "Connected Routes"; case META_QUEUE_KERNEL: @@ -2468,6 +2497,33 @@ static void process_subq_nhg(struct listnode *lnode) XFREE(MTYPE_WQ_WRAPPER, w); } +static void process_subq_early_label(struct listnode *lnode) +{ + struct wq_label_wrapper *w = listgetdata(lnode); + struct zebra_vrf *zvrf; + + if (!w) + return; + + zvrf = vrf_info_lookup(w->vrf_id); + if (!zvrf) { + XFREE(MTYPE_WQ_WRAPPER, w); + return; + } + + switch (w->type) { + case WQ_LABEL_FTN_UNINSTALL: + zebra_mpls_ftn_uninstall(zvrf, w->ltype, &w->p, w->route_type, + w->route_instance); + break; + case WQ_LABEL_LABELS_PROCESS: + zebra_mpls_zapi_labels_process(w->add_p, zvrf, &w->zl); + break; + } + + XFREE(MTYPE_WQ_WRAPPER, w); +} + static void process_subq_route(struct listnode *lnode, uint8_t qindex) { struct route_node *rnode = NULL; @@ -2506,6 +2562,460 @@ static void process_subq_route(struct listnode *lnode, uint8_t qindex) route_unlock_node(rnode); } +static void rib_re_nhg_free(struct route_entry *re) +{ + if (re->nhe && re->nhe_id) { + assert(re->nhe->id == re->nhe_id); + route_entry_update_nhe(re, NULL); + } else if (re->nhe && re->nhe->nhg.nexthop) + nexthops_free(re->nhe->nhg.nexthop); + + nexthops_free(re->fib_ng.nexthop); +} + +struct zebra_early_route { + afi_t afi; + safi_t safi; + struct prefix p; + struct prefix_ipv6 src_p; + bool src_p_provided; + struct route_entry *re; + struct nhg_hash_entry *re_nhe; + bool startup; + bool deletion; + bool fromkernel; +}; + +static void early_route_memory_free(struct zebra_early_route *ere) +{ + if (ere->re_nhe) + zebra_nhg_free(ere->re_nhe); + + XFREE(MTYPE_RE, ere->re); + XFREE(MTYPE_WQ_WRAPPER, ere); +} + +static void process_subq_early_route_add(struct zebra_early_route *ere) +{ + struct route_entry *re = ere->re; + struct route_table *table; + struct nhg_hash_entry *nhe = NULL; + struct route_node *rn; + struct route_entry *same = NULL, *first_same = NULL; + int same_count = 0; + rib_dest_t *dest; + + /* Lookup table. */ + table = zebra_vrf_get_table_with_table_id(ere->afi, ere->safi, + re->vrf_id, re->table); + if (!table) { + early_route_memory_free(ere); + return; + } + + if (re->nhe_id > 0) { + nhe = zebra_nhg_lookup_id(re->nhe_id); + + if (!nhe) { + /* + * We've received from the kernel a nexthop id + * that we don't have saved yet. More than likely + * it has not been processed and is on the + * queue to be processed. Let's stop what we + * are doing and cause the meta q to be processed + * storing this for later. + * + * This is being done this way because zebra + * runs with the assumption t + */ + flog_err( + EC_ZEBRA_TABLE_LOOKUP_FAILED, + "Zebra failed to find the nexthop hash entry for id=%u in a route entry %pFX", + re->nhe_id, &ere->p); + + early_route_memory_free(ere); + return; + } + } else { + /* Lookup nhe from route information */ + nhe = zebra_nhg_rib_find_nhe(ere->re_nhe, ere->afi); + if (!nhe) { + char buf2[PREFIX_STRLEN] = ""; + + flog_err( + EC_ZEBRA_TABLE_LOOKUP_FAILED, + "Zebra failed to find or create a nexthop hash entry for %pFX%s%s", + &ere->p, ere->src_p_provided ? " from " : "", + ere->src_p_provided + ? prefix2str(&ere->src_p, buf2, + sizeof(buf2)) + : ""); + + early_route_memory_free(ere); + return; + } + } + + /* + * Attach the re to the nhe's nexthop group. + * + * TODO: This will need to change when we start getting IDs from upper + * level protocols, as the refcnt might be wrong, since it checks + * if old_id != new_id. + */ + route_entry_update_nhe(re, nhe); + + /* Make it sure prefixlen is applied to the prefix. */ + apply_mask(&ere->p); + if (ere->src_p_provided) + apply_mask_ipv6(&ere->src_p); + + /* Set default distance by route type. */ + if (re->distance == 0) + re->distance = route_distance(re->type); + + /* Lookup route node.*/ + rn = srcdest_rnode_get(table, &ere->p, + ere->src_p_provided ? &ere->src_p : NULL); + + /* + * If same type of route are installed, treat it as a implicit + * withdraw. If the user has specified the No route replace semantics + * for the install don't do a route replace. + */ + RNODE_FOREACH_RE (rn, same) { + if (CHECK_FLAG(same->status, ROUTE_ENTRY_REMOVED)) { + same_count++; + continue; + } + + /* Compare various route_entry properties */ + if (rib_compare_routes(re, same)) { + same_count++; + + if (first_same == NULL) + first_same = same; + } + } + + same = first_same; + + if (!ere->startup && (re->flags & ZEBRA_FLAG_SELFROUTE) && + zrouter.asic_offloaded) { + if (!same) { + if (IS_ZEBRA_DEBUG_RIB) + zlog_debug( + "prefix: %pRN is a self route where we do not have an entry for it. Dropping this update, it's useless", + rn); + /* + * We are not on startup, this is a self route + * and we have asic offload. Which means + * we are getting a callback for a entry + * that was already deleted to the kernel + * but an earlier response was just handed + * back. Drop it on the floor + */ + early_route_memory_free(ere); + return; + } + } + + /* If this route is kernel/connected route, notify the dataplane. */ + if (RIB_SYSTEM_ROUTE(re)) { + /* Notify dataplane */ + dplane_sys_route_add(rn, re); + } + + /* Link new re to node.*/ + if (IS_ZEBRA_DEBUG_RIB) { + rnode_debug( + rn, re->vrf_id, + "Inserting route rn %p, re %p (%s) existing %p, same_count %d", + rn, re, zebra_route_string(re->type), same, same_count); + + if (IS_ZEBRA_DEBUG_RIB_DETAILED) + route_entry_dump( + &ere->p, + ere->src_p_provided ? &ere->src_p : NULL, re); + } + + SET_FLAG(re->status, ROUTE_ENTRY_CHANGED); + rib_addnode(rn, re, 1); + + /* Free implicit route.*/ + if (same) + rib_delnode(rn, same); + + /* See if we can remove some RE entries that are queued for + * removal, but won't be considered in rib processing. + */ + dest = rib_dest_from_rnode(rn); + RNODE_FOREACH_RE_SAFE (rn, re, same) { + if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED)) { + /* If the route was used earlier, must retain it. */ + if (dest && re == dest->selected_fib) + continue; + + if (IS_ZEBRA_DEBUG_RIB) + rnode_debug(rn, re->vrf_id, + "rn %p, removing unneeded re %p", + rn, re); + + rib_unlink(rn, re); + } + } + + route_unlock_node(rn); + if (ere->re_nhe) + zebra_nhg_free(ere->re_nhe); + XFREE(MTYPE_WQ_WRAPPER, ere); +} + +static void process_subq_early_route_delete(struct zebra_early_route *ere) +{ + struct route_table *table; + struct route_node *rn; + struct route_entry *re; + struct route_entry *fib = NULL; + struct route_entry *same = NULL; + struct nexthop *rtnh; + char buf2[INET6_ADDRSTRLEN]; + rib_dest_t *dest; + + if (ere->src_p_provided) + assert(!ere->src_p.prefixlen || ere->afi == AFI_IP6); + + /* Lookup table. */ + table = zebra_vrf_lookup_table_with_table_id( + ere->afi, ere->safi, ere->re->vrf_id, ere->re->table); + if (!table) { + early_route_memory_free(ere); + return; + } + + /* Apply mask. */ + apply_mask(&ere->p); + if (ere->src_p_provided) + apply_mask_ipv6(&ere->src_p); + + /* Lookup route node. */ + rn = srcdest_rnode_lookup(table, &ere->p, + ere->src_p_provided ? &ere->src_p : NULL); + if (!rn) { + if (IS_ZEBRA_DEBUG_RIB) { + char src_buf[PREFIX_STRLEN]; + struct vrf *vrf = vrf_lookup_by_id(ere->re->vrf_id); + + if (ere->src_p_provided && ere->src_p.prefixlen) + prefix2str(&ere->src_p, src_buf, + sizeof(src_buf)); + else + src_buf[0] = '\0'; + + zlog_debug("%s[%d]:%pRN%s%s doesn't exist in rib", + vrf->name, ere->re->table, rn, + (src_buf[0] != '\0') ? " from " : "", + src_buf); + } + early_route_memory_free(ere); + return; + } + + dest = rib_dest_from_rnode(rn); + fib = dest->selected_fib; + + struct nexthop *nh = NULL; + + if (ere->re->nhe) + nh = ere->re->nhe->nhg.nexthop; + + /* Lookup same type route. */ + RNODE_FOREACH_RE (rn, re) { + if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED)) + continue; + + if (re->type != ere->re->type) + continue; + if (re->instance != ere->re->instance) + continue; + if (CHECK_FLAG(re->flags, ZEBRA_FLAG_RR_USE_DISTANCE) && + ere->re->distance != re->distance) + continue; + + if (re->type == ZEBRA_ROUTE_KERNEL && + re->metric != ere->re->metric) + continue; + if (re->type == ZEBRA_ROUTE_CONNECT && (rtnh = nh) && + rtnh->type == NEXTHOP_TYPE_IFINDEX && nh) { + if (rtnh->ifindex != nh->ifindex) + continue; + same = re; + break; + } + + /* Make sure that the route found has the same gateway. */ + if (ere->re->nhe_id && re->nhe_id == ere->re->nhe_id) { + same = re; + break; + } + + if (nh == NULL) { + same = re; + break; + } + for (ALL_NEXTHOPS(re->nhe->nhg, rtnh)) { + /* + * No guarantee all kernel send nh with labels + * on delete. + */ + if (nexthop_same_no_labels(rtnh, nh)) { + same = re; + break; + } + } + + if (same) + break; + } + /* + * If same type of route can't be found and this message is from + * kernel. + */ + if (!same) { + /* + * In the past(HA!) we could get here because + * we were receiving a route delete from the + * kernel and we're not marking the proto + * as coming from it's appropriate originator. + * Now that we are properly noticing the fact + * that the kernel has deleted our route we + * are not going to get called in this path + * I am going to leave this here because + * this might still work this way on non-linux + * platforms as well as some weird state I have + * not properly thought of yet. + * If we can show that this code path is + * dead then we can remove it. + */ + if (fib && CHECK_FLAG(ere->re->flags, ZEBRA_FLAG_SELFROUTE)) { + if (IS_ZEBRA_DEBUG_RIB) { + rnode_debug( + rn, ere->re->vrf_id, + "rn %p, re %p (%s) was deleted from kernel, adding", + rn, fib, zebra_route_string(fib->type)); + } + if (zrouter.allow_delete || + CHECK_FLAG(dest->flags, RIB_ROUTE_ANY_QUEUED)) { + UNSET_FLAG(fib->status, ROUTE_ENTRY_INSTALLED); + /* Unset flags. */ + for (rtnh = fib->nhe->nhg.nexthop; rtnh; + rtnh = rtnh->next) + UNSET_FLAG(rtnh->flags, + NEXTHOP_FLAG_FIB); + + /* + * This is a non FRR route + * as such we should mark + * it as deleted + */ + dest->selected_fib = NULL; + } else { + /* + * This means someone else, other than Zebra, + * has deleted a Zebra router from the kernel. + * We will add it back + */ + rib_install_kernel(rn, fib, NULL); + } + } else { + if (IS_ZEBRA_DEBUG_RIB) { + if (nh) + rnode_debug( + rn, ere->re->vrf_id, + "via %s ifindex %d type %d doesn't exist in rib", + inet_ntop(afi2family(ere->afi), + &nh->gate, buf2, + sizeof(buf2)), + nh->ifindex, ere->re->type); + else + rnode_debug( + rn, ere->re->vrf_id, + "type %d doesn't exist in rib", + ere->re->type); + } + route_unlock_node(rn); + early_route_memory_free(ere); + return; + } + } + + if (same) { + struct nexthop *tmp_nh; + + if (ere->fromkernel && + CHECK_FLAG(ere->re->flags, ZEBRA_FLAG_SELFROUTE) && + !zrouter.allow_delete) { + rib_install_kernel(rn, same, NULL); + route_unlock_node(rn); + + early_route_memory_free(ere); + return; + } + + /* Special handling for IPv4 or IPv6 routes sourced from + * EVPN - the nexthop (and associated MAC) need to be + * uninstalled if no more refs. + */ + for (ALL_NEXTHOPS(re->nhe->nhg, tmp_nh)) { + struct ipaddr vtep_ip; + + if (CHECK_FLAG(tmp_nh->flags, NEXTHOP_FLAG_EVPN)) { + memset(&vtep_ip, 0, sizeof(struct ipaddr)); + if (ere->afi == AFI_IP) { + vtep_ip.ipa_type = IPADDR_V4; + memcpy(&(vtep_ip.ipaddr_v4), + &(tmp_nh->gate.ipv4), + sizeof(struct in_addr)); + } else { + vtep_ip.ipa_type = IPADDR_V6; + memcpy(&(vtep_ip.ipaddr_v6), + &(tmp_nh->gate.ipv6), + sizeof(struct in6_addr)); + } + zebra_rib_queue_evpn_route_del( + re->vrf_id, &vtep_ip, &ere->p); + } + } + + /* Notify dplane if system route changes */ + if (RIB_SYSTEM_ROUTE(re)) + dplane_sys_route_del(rn, same); + + rib_delnode(rn, same); + } + + route_unlock_node(rn); + + early_route_memory_free(ere); +} + +/* + * When FRR receives a route we need to match the route up to + * nexthop groups. That we also may have just received + * place the data on this queue so that this work of finding + * the nexthop group entries for the route entry is always + * done after the nexthop group has had a chance to be processed + */ +static void process_subq_early_route(struct listnode *lnode) +{ + struct zebra_early_route *ere = listgetdata(lnode); + + if (ere->deletion) + process_subq_early_route_delete(ere); + else + process_subq_early_route_add(ere); +} + /* * Examine the specified subqueue; process one entry and return 1 if * there is a node, return 0 otherwise. @@ -2525,6 +3035,12 @@ static unsigned int process_subq(struct list *subq, case META_QUEUE_NHG: process_subq_nhg(lnode); break; + case META_QUEUE_EARLY_ROUTE: + process_subq_early_route(lnode); + break; + case META_QUEUE_EARLY_LABEL: + process_subq_early_label(lnode); + break; case META_QUEUE_CONNECTED: case META_QUEUE_KERNEL: case META_QUEUE_STATIC: @@ -2555,8 +3071,9 @@ static wq_item_status meta_queue_process(struct work_queue *dummy, void *data) queue_len = dplane_get_in_queue_len(); if (queue_len > queue_limit) { if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug("rib queue: dplane queue len %u, limit %u, retrying", - queue_len, queue_limit); + zlog_debug( + "rib queue: dplane queue len %u, limit %u, retrying", + queue_len, queue_limit); /* Ensure that the meta-queue is actually enqueued */ if (work_queue_empty(zrouter.ribq)) @@ -2635,6 +3152,13 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data) return 0; } +static int early_label_meta_queue_add(struct meta_queue *mq, void *data) +{ + listnode_add(mq->subq[META_QUEUE_EARLY_LABEL], data); + mq->size++; + return 0; +} + static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data) { struct nhg_ctx *ctx = NULL; @@ -2718,6 +3242,44 @@ static int mq_add_handler(void *data, return mq_add_func(zrouter.mq, data); } +void mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type, + struct prefix *prefix, uint8_t route_type, + uint8_t route_instance) +{ + struct wq_label_wrapper *w; + + w = XCALLOC(MTYPE_WQ_WRAPPER, sizeof(struct wq_label_wrapper)); + + w->type = WQ_LABEL_FTN_UNINSTALL; + w->vrf_id = zvrf->vrf->vrf_id; + w->p = *prefix; + w->ltype = type; + w->route_type = route_type; + w->route_instance = route_instance; + + if (IS_ZEBRA_DEBUG_RIB_DETAILED) + zlog_debug("Early Label Handling for %pFX", prefix); + + mq_add_handler(w, early_label_meta_queue_add); +} + +void mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf, + const struct zapi_labels *zl) +{ + struct wq_label_wrapper *w; + + w = XCALLOC(MTYPE_WQ_WRAPPER, sizeof(struct wq_label_wrapper)); + w->type = WQ_LABEL_LABELS_PROCESS; + w->vrf_id = zvrf->vrf->vrf_id; + w->add_p = add_p; + w->zl = *zl; + + if (IS_ZEBRA_DEBUG_RIB_DETAILED) + zlog_debug("Early Label Handling: Labels Process"); + + mq_add_handler(w, early_label_meta_queue_add); +} + /* Add route_node to work queue and schedule processing */ int rib_queue_add(struct route_node *rn) { @@ -2958,7 +3520,6 @@ int zebra_rib_queue_evpn_rem_vtep_del(vrf_id_t vrf_id, vni_t vni, return mq_add_handler(w, rib_meta_queue_evpn_add); } - /* Create new meta queue. A destructor function doesn't seem to be necessary here. */ @@ -3034,6 +3595,29 @@ static void nhg_meta_queue_free(struct meta_queue *mq, struct list *l, } } +static void early_label_meta_queue_free(struct meta_queue *mq, struct list *l, + struct zebra_vrf *zvrf) +{ + struct wq_label_wrapper *w; + struct listnode *node, *nnode; + + for (ALL_LIST_ELEMENTS(l, node, nnode, w)) { + if (zvrf && zvrf->vrf->vrf_id != w->vrf_id) + continue; + + switch (w->type) { + case WQ_LABEL_FTN_UNINSTALL: + case WQ_LABEL_LABELS_PROCESS: + break; + } + + node->data = NULL; + XFREE(MTYPE_WQ_WRAPPER, w); + list_delete_node(l, node); + mq->size--; + } +} + static void rib_meta_queue_free(struct meta_queue *mq, struct list *l, struct zebra_vrf *zvrf) { @@ -3053,6 +3637,22 @@ static void rib_meta_queue_free(struct meta_queue *mq, struct list *l, } } +static void early_route_meta_queue_free(struct meta_queue *mq, struct list *l, + struct zebra_vrf *zvrf) +{ + struct zebra_early_route *zer; + struct listnode *node, *nnode; + + for (ALL_LIST_ELEMENTS(l, node, nnode, zer)) { + if (zvrf && zer->re->vrf_id != zvrf->vrf->vrf_id) + continue; + + XFREE(MTYPE_RE, zer); + node->data = NULL; + list_delete_node(l, node); + mq->size--; + } +} void meta_queue_free(struct meta_queue *mq, struct zebra_vrf *zvrf) { @@ -3067,6 +3667,12 @@ void meta_queue_free(struct meta_queue *mq, struct zebra_vrf *zvrf) case META_QUEUE_EVPN: evpn_meta_queue_free(mq, mq->subq[i], zvrf); break; + case META_QUEUE_EARLY_ROUTE: + early_route_meta_queue_free(mq, mq->subq[i], zvrf); + break; + case META_QUEUE_EARLY_LABEL: + early_label_meta_queue_free(mq, mq->subq[i], zvrf); + break; case META_QUEUE_CONNECTED: case META_QUEUE_KERNEL: case META_QUEUE_STATIC: @@ -3210,17 +3816,6 @@ static void rib_addnode(struct route_node *rn, rib_link(rn, re, process); } -static void rib_re_nhg_free(struct route_entry *re) -{ - if (re->nhe && re->nhe_id) { - assert(re->nhe->id == re->nhe_id); - route_entry_update_nhe(re, NULL); - } else if (re->nhe && re->nhe->nhg.nexthop) - nexthops_free(re->nhe->nhg.nexthop); - - nexthops_free(re->fib_ng.nexthop); -} - /* * rib_unlink * @@ -3426,6 +4021,46 @@ void _route_entry_dump(const char *func, union prefixconstptr pp, zlog_debug("%s: dump complete", straddr); } +static int rib_meta_queue_early_route_add(struct meta_queue *mq, void *data) +{ + struct zebra_early_route *ere = data; + + listnode_add(mq->subq[META_QUEUE_EARLY_ROUTE], data); + mq->size++; + + if (IS_ZEBRA_DEBUG_RIB_DETAILED) + zlog_debug( + "Route %pFX(%u) queued for processing into sub-queue %s", + &ere->p, ere->re->vrf_id, + subqueue2str(META_QUEUE_EARLY_ROUTE)); + + return 0; +} + +struct route_entry *zebra_rib_route_entry_new(vrf_id_t vrf_id, int type, + uint8_t instance, uint32_t flags, + uint32_t nhe_id, + uint32_t table_id, + uint32_t metric, uint32_t mtu, + uint8_t distance, route_tag_t tag) +{ + struct route_entry *re; + + re = XCALLOC(MTYPE_RE, sizeof(struct route_entry)); + re->type = type; + re->instance = instance; + re->distance = distance; + re->flags = flags; + re->metric = metric; + re->mtu = mtu; + re->table = table_id; + re->vrf_id = vrf_id; + re->uptime = monotime(NULL); + re->tag = tag; + re->nhe_id = nhe_id; + + return re; +} /* * Internal route-add implementation; there are a couple of different public * signatures. Callers in this path are responsible for the memory they @@ -3441,162 +4076,25 @@ int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p, struct prefix_ipv6 *src_p, struct route_entry *re, struct nhg_hash_entry *re_nhe, bool startup) { - struct nhg_hash_entry *nhe = NULL; - struct route_table *table; - struct route_node *rn; - struct route_entry *same = NULL, *first_same = NULL; - int ret = 0; - int same_count = 0; - rib_dest_t *dest; + struct zebra_early_route *ere; - if (!re || !re_nhe) + if (!re) return -1; assert(!src_p || !src_p->prefixlen || afi == AFI_IP6); - /* Lookup table. */ - table = zebra_vrf_get_table_with_table_id(afi, safi, re->vrf_id, - re->table); - if (!table) - return -1; - - if (re->nhe_id > 0) { - nhe = zebra_nhg_lookup_id(re->nhe_id); - - if (!nhe) { - flog_err( - EC_ZEBRA_TABLE_LOOKUP_FAILED, - "Zebra failed to find the nexthop hash entry for id=%u in a route entry", - re->nhe_id); - - return -1; - } - } else { - /* Lookup nhe from route information */ - nhe = zebra_nhg_rib_find_nhe(re_nhe, afi); - if (!nhe) { - char buf2[PREFIX_STRLEN] = ""; - - flog_err( - EC_ZEBRA_TABLE_LOOKUP_FAILED, - "Zebra failed to find or create a nexthop hash entry for %pFX%s%s", - p, src_p ? " from " : "", - src_p ? prefix2str(src_p, buf2, sizeof(buf2)) - : ""); - - return -1; - } - } - - /* - * Attach the re to the nhe's nexthop group. - * - * TODO: This will need to change when we start getting IDs from upper - * level protocols, as the refcnt might be wrong, since it checks - * if old_id != new_id. - */ - route_entry_update_nhe(re, nhe); - - /* Make it sure prefixlen is applied to the prefix. */ - apply_mask(p); + ere = XCALLOC(MTYPE_WQ_WRAPPER, sizeof(*ere)); + ere->afi = afi; + ere->safi = safi; + ere->p = *p; if (src_p) - apply_mask_ipv6(src_p); - - /* Set default distance by route type. */ - if (re->distance == 0) - re->distance = route_distance(re->type); - - /* Lookup route node.*/ - rn = srcdest_rnode_get(table, p, src_p); - - /* - * If same type of route are installed, treat it as a implicit - * withdraw. If the user has specified the No route replace semantics - * for the install don't do a route replace. - */ - RNODE_FOREACH_RE (rn, same) { - if (CHECK_FLAG(same->status, ROUTE_ENTRY_REMOVED)) { - same_count++; - continue; - } - - /* Compare various route_entry properties */ - if (rib_compare_routes(re, same)) { - same_count++; - - if (first_same == NULL) - first_same = same; - } - } - - same = first_same; - - if (!startup && - (re->flags & ZEBRA_FLAG_SELFROUTE) && zrouter.asic_offloaded) { - if (!same) { - if (IS_ZEBRA_DEBUG_RIB) - zlog_debug("prefix: %pRN is a self route where we do not have an entry for it. Dropping this update, it's useless", rn); - /* - * We are not on startup, this is a self route - * and we have asic offload. Which means - * we are getting a callback for a entry - * that was already deleted to the kernel - * but an earlier response was just handed - * back. Drop it on the floor - */ - rib_re_nhg_free(re); - - XFREE(MTYPE_RE, re); - return ret; - } - } - - /* If this route is kernel/connected route, notify the dataplane. */ - if (RIB_SYSTEM_ROUTE(re)) { - /* Notify dataplane */ - dplane_sys_route_add(rn, re); - } - - /* Link new re to node.*/ - if (IS_ZEBRA_DEBUG_RIB) { - rnode_debug(rn, re->vrf_id, - "Inserting route rn %p, re %p (%s) existing %p, same_count %d", - rn, re, zebra_route_string(re->type), same, - same_count); - - if (IS_ZEBRA_DEBUG_RIB_DETAILED) - route_entry_dump(p, src_p, re); - } - - SET_FLAG(re->status, ROUTE_ENTRY_CHANGED); - rib_addnode(rn, re, 1); - - /* Free implicit route.*/ - if (same) { - ret = 1; - rib_delnode(rn, same); - } - - /* See if we can remove some RE entries that are queued for - * removal, but won't be considered in rib processing. - */ - dest = rib_dest_from_rnode(rn); - RNODE_FOREACH_RE_SAFE (rn, re, same) { - if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED)) { - /* If the route was used earlier, must retain it. */ - if (dest && re == dest->selected_fib) - continue; - - if (IS_ZEBRA_DEBUG_RIB) - rnode_debug(rn, re->vrf_id, "rn %p, removing unneeded re %p", - rn, re); + ere->src_p = *src_p; + ere->src_p_provided = !!src_p; + ere->re = re; + ere->re_nhe = re_nhe; + ere->startup = startup; - rib_unlink(rn, re); - } - } - - route_unlock_node(rn); - return ret; + return mq_add_handler(ere, rib_meta_queue_early_route_add); } /* @@ -3607,7 +4105,7 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, struct nexthop_group *ng, bool startup) { int ret; - struct nhg_hash_entry nhe; + struct nhg_hash_entry nhe, *n; if (!re) return -1; @@ -3625,10 +4123,8 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, else if (re->nhe_id > 0) nhe.id = re->nhe_id; - ret = rib_add_multipath_nhe(afi, safi, p, src_p, re, &nhe, startup); - - /* In this path, the callers expect memory to be freed. */ - nexthop_group_delete(&ng); + n = zebra_nhe_copy(&nhe, 0); + ret = rib_add_multipath_nhe(afi, safi, p, src_p, re, n, startup); /* In error cases, free the route also */ if (ret < 0) @@ -3643,212 +4139,32 @@ void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, uint32_t nhe_id, uint32_t table_id, uint32_t metric, uint8_t distance, bool fromkernel) { - struct route_table *table; - struct route_node *rn; - struct route_entry *re; - struct route_entry *fib = NULL; - struct route_entry *same = NULL; - struct nexthop *rtnh; - char buf2[INET6_ADDRSTRLEN]; - rib_dest_t *dest; - - assert(!src_p || !src_p->prefixlen || afi == AFI_IP6); - - /* Lookup table. */ - table = zebra_vrf_lookup_table_with_table_id(afi, safi, vrf_id, - table_id); - if (!table) - return; - - /* Apply mask. */ - apply_mask(p); - if (src_p) - apply_mask_ipv6(src_p); - - /* Lookup route node. */ - rn = srcdest_rnode_lookup(table, p, src_p); - if (!rn) { - if (IS_ZEBRA_DEBUG_RIB) { - char src_buf[PREFIX_STRLEN]; - struct vrf *vrf = vrf_lookup_by_id(vrf_id); - - if (src_p && src_p->prefixlen) - prefix2str(src_p, src_buf, sizeof(src_buf)); - else - src_buf[0] = '\0'; - - zlog_debug("%s[%d]:%pRN%s%s doesn't exist in rib", - vrf->name, table_id, rn, - (src_buf[0] != '\0') ? " from " : "", - src_buf); - } - return; - } - - dest = rib_dest_from_rnode(rn); - fib = dest->selected_fib; - - /* Lookup same type route. */ - RNODE_FOREACH_RE (rn, re) { - if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED)) - continue; - - if (re->type != type) - continue; - if (re->instance != instance) - continue; - if (CHECK_FLAG(re->flags, ZEBRA_FLAG_RR_USE_DISTANCE) && - distance != re->distance) - continue; - - if (re->type == ZEBRA_ROUTE_KERNEL && re->metric != metric) - continue; - if (re->type == ZEBRA_ROUTE_CONNECT && - (rtnh = re->nhe->nhg.nexthop) - && rtnh->type == NEXTHOP_TYPE_IFINDEX && nh) { - if (rtnh->ifindex != nh->ifindex) - continue; - same = re; - break; - } - - /* Make sure that the route found has the same gateway. */ - if (nhe_id && re->nhe_id == nhe_id) { - same = re; - break; - } - - if (nh == NULL) { - same = re; - break; - } - for (ALL_NEXTHOPS(re->nhe->nhg, rtnh)) { - /* - * No guarantee all kernel send nh with labels - * on delete. - */ - if (nexthop_same_no_labels(rtnh, nh)) { - same = re; - break; - } - } - - if (same) - break; - } - /* If same type of route can't be found and this message is from - kernel. */ - if (!same) { - /* - * In the past(HA!) we could get here because - * we were receiving a route delete from the - * kernel and we're not marking the proto - * as coming from it's appropriate originator. - * Now that we are properly noticing the fact - * that the kernel has deleted our route we - * are not going to get called in this path - * I am going to leave this here because - * this might still work this way on non-linux - * platforms as well as some weird state I have - * not properly thought of yet. - * If we can show that this code path is - * dead then we can remove it. - */ - if (fib && CHECK_FLAG(flags, ZEBRA_FLAG_SELFROUTE)) { - if (IS_ZEBRA_DEBUG_RIB) { - rnode_debug(rn, vrf_id, - "rn %p, re %p (%s) was deleted from kernel, adding", - rn, fib, - zebra_route_string(fib->type)); - } - if (zrouter.allow_delete || - CHECK_FLAG(dest->flags, RIB_ROUTE_ANY_QUEUED)) { - UNSET_FLAG(fib->status, ROUTE_ENTRY_INSTALLED); - /* Unset flags. */ - for (rtnh = fib->nhe->nhg.nexthop; rtnh; - rtnh = rtnh->next) - UNSET_FLAG(rtnh->flags, - NEXTHOP_FLAG_FIB); - - /* - * This is a non FRR route - * as such we should mark - * it as deleted - */ - dest->selected_fib = NULL; - } else { - /* This means someone else, other than Zebra, - * has deleted - * a Zebra router from the kernel. We will add - * it back */ - rib_install_kernel(rn, fib, NULL); - } - } else { - if (IS_ZEBRA_DEBUG_RIB) { - if (nh) - rnode_debug( - rn, vrf_id, - "via %s ifindex %d type %d doesn't exist in rib", - inet_ntop(afi2family(afi), - &nh->gate, buf2, - sizeof(buf2)), - nh->ifindex, type); - else - rnode_debug( - rn, vrf_id, - "type %d doesn't exist in rib", - type); - } - route_unlock_node(rn); - return; - } - } - - if (same) { - struct nexthop *tmp_nh; - - if (fromkernel && CHECK_FLAG(flags, ZEBRA_FLAG_SELFROUTE) && - !zrouter.allow_delete) { - rib_install_kernel(rn, same, NULL); - route_unlock_node(rn); - - return; - } - - /* Special handling for IPv4 or IPv6 routes sourced from - * EVPN - the nexthop (and associated MAC) need to be - * uninstalled if no more refs. - */ - for (ALL_NEXTHOPS(re->nhe->nhg, tmp_nh)) { - struct ipaddr vtep_ip; - - if (CHECK_FLAG(tmp_nh->flags, NEXTHOP_FLAG_EVPN)) { - memset(&vtep_ip, 0, sizeof(struct ipaddr)); - if (afi == AFI_IP) { - vtep_ip.ipa_type = IPADDR_V4; - memcpy(&(vtep_ip.ipaddr_v4), - &(tmp_nh->gate.ipv4), - sizeof(struct in_addr)); - } else { - vtep_ip.ipa_type = IPADDR_V6; - memcpy(&(vtep_ip.ipaddr_v6), - &(tmp_nh->gate.ipv6), - sizeof(struct in6_addr)); - } - zebra_rib_queue_evpn_route_del(re->vrf_id, - &vtep_ip, p); - } - } + struct zebra_early_route *ere; + struct route_entry *re = NULL; + struct nhg_hash_entry *nhe = NULL; - /* Notify dplane if system route changes */ - if (RIB_SYSTEM_ROUTE(re)) - dplane_sys_route_del(rn, same); + re = zebra_rib_route_entry_new(vrf_id, type, instance, flags, nhe_id, + table_id, metric, 0, distance, 0); - rib_delnode(rn, same); + if (nh) { + nhe = zebra_nhg_alloc(); + nhe->nhg.nexthop = nexthop_dup(nh, NULL); } - route_unlock_node(rn); - return; + ere = XCALLOC(MTYPE_WQ_WRAPPER, sizeof(*ere)); + ere->afi = afi; + ere->safi = safi; + ere->p = *p; + if (src_p) + ere->src_p = *src_p; + ere->src_p_provided = !!src_p; + ere->re = re; + ere->re_nhe = nhe; + ere->startup = false; + ere->deletion = true; + ere->fromkernel = fromkernel; + + mq_add_handler(ere, rib_meta_queue_early_route_add); } @@ -3859,36 +4175,23 @@ int rib_add(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, uint8_t distance, route_tag_t tag, bool startup) { struct route_entry *re = NULL; - struct nexthop *nexthop = NULL; - struct nexthop_group *ng = NULL; + struct nexthop nexthop = {}; + struct nexthop_group ng = {}; /* Allocate new route_entry structure. */ - re = XCALLOC(MTYPE_RE, sizeof(struct route_entry)); - re->type = type; - re->instance = instance; - re->distance = distance; - re->flags = flags; - re->metric = metric; - re->mtu = mtu; - re->table = table_id; - re->vrf_id = vrf_id; - re->uptime = monotime(NULL); - re->tag = tag; - re->nhe_id = nhe_id; + re = zebra_rib_route_entry_new(vrf_id, type, instance, flags, nhe_id, + table_id, metric, mtu, distance, tag); /* If the owner of the route supplies a shared nexthop-group id, * we'll use that. Otherwise, pass the nexthop along directly. */ if (!nhe_id) { - ng = nexthop_group_new(); - /* Add nexthop. */ - nexthop = nexthop_new(); - *nexthop = *nh; - nexthop_group_add_sorted(ng, nexthop); + nexthop = *nh; + nexthop_group_add_sorted(&ng, &nexthop); } - return rib_add_multipath(afi, safi, p, src_p, re, ng, startup); + return rib_add_multipath(afi, safi, p, src_p, re, &ng, startup); } static const char *rib_update_event2str(enum rib_update_event event) diff --git a/zebra/zebra_script.c b/zebra/zebra_script.c index d247f87708..2e2f4159cd 100644 --- a/zebra/zebra_script.c +++ b/zebra/zebra_script.c @@ -329,14 +329,6 @@ void lua_pushzebra_dplane_ctx(lua_State *L, const struct zebra_dplane_ctx *ctx) lua_setfield(L, -2, "ipset"); break; } - case DPLANE_OP_ADDR_INSTALL: - case DPLANE_OP_ADDR_UNINSTALL: - case DPLANE_OP_INTF_ADDR_ADD: - case DPLANE_OP_INTF_ADDR_DEL: - case DPLANE_OP_INTF_INSTALL: - case DPLANE_OP_INTF_UPDATE: - case DPLANE_OP_INTF_DELETE: - break; case DPLANE_OP_NEIGH_INSTALL: case DPLANE_OP_NEIGH_UPDATE: case DPLANE_OP_NEIGH_DELETE: @@ -418,6 +410,17 @@ void lua_pushzebra_dplane_ctx(lua_State *L, const struct zebra_dplane_ctx *ctx) } lua_setfield(L, -2, "gre"); + case DPLANE_OP_ADDR_INSTALL: + case DPLANE_OP_ADDR_UNINSTALL: + case DPLANE_OP_INTF_ADDR_ADD: + case DPLANE_OP_INTF_ADDR_DEL: + case DPLANE_OP_INTF_INSTALL: + case DPLANE_OP_INTF_UPDATE: + case DPLANE_OP_INTF_DELETE: + case DPLANE_OP_TC_INSTALL: + case DPLANE_OP_TC_UPDATE: + case DPLANE_OP_TC_DELETE: + /* Not currently handled */ case DPLANE_OP_INTF_NETCONFIG: /*NYI*/ case DPLANE_OP_NONE: break; |
