diff options
311 files changed, 15308 insertions, 3507 deletions
diff --git a/.github/workflows/build-test-docker.yml b/.github/workflows/build-test-docker.yml index 3f53f32d3a..aef41d5733 100644 --- a/.github/workflows/build-test-docker.yml +++ b/.github/workflows/build-test-docker.yml @@ -12,8 +12,8 @@ defaults: shell: bash jobs: - build-docker: - name: Build the ubuntu 22.04 docker image + build-x86-docker: + name: Build the x86 ubuntu 22.04 docker image runs-on: ubuntu-latest steps: - name: Checkout @@ -22,32 +22,32 @@ jobs: fetch-depth: 1 - name: Build docker image run: | - docker build -t frr-ubuntu22 -f docker/ubuntu-ci/Dockerfile . - docker save --output /tmp/frr-ubuntu22.tar frr-ubuntu22 + docker build -t frr-x86-ubuntu22 -f docker/ubuntu-ci/Dockerfile . + docker save --output /tmp/frr-x86-ubuntu22.tar frr-x86-ubuntu22 - name: Upload docker image artifact uses: actions/upload-artifact@v4 with: - name: ubuntu-image - path: /tmp/frr-ubuntu22.tar + name: ubuntu-x86-image + path: /tmp/frr-x86-ubuntu22.tar - name: Clear any previous results # So if all jobs are re-run then all tests will be re-run run: | - rm -rf test-results* - mkdir -p test-results - touch test-results/cleared-results.txt + rm -rf test-results-x86* + mkdir -p test-results-x86 + touch test-results-x86/cleared-results.txt - name: Save cleared previous results uses: actions/upload-artifact@v4 with: - name: test-results - path: test-results + name: test-results-x86 + path: test-results-x86 overwrite: true - name: Cleanup if: ${{ always() }} - run: rm -rf test-results* /tmp/frr-ubuntu22.tar + run: rm -rf test-results-x86* /tmp/frr-x86-ubuntu22.tar - test-docker: - name: Test ubuntu docker image - needs: build-docker + test-x86-docker: + name: Test ubuntu x86 docker image + needs: build-x86-docker runs-on: ubuntu-latest steps: - name: Checkout @@ -57,14 +57,14 @@ jobs: - name: Fetch docker image artifact uses: actions/download-artifact@v4 with: - name: ubuntu-image + name: ubuntu-x86-image path: /tmp - name: Fetch previous results if: ${{ github.run_attempt > 1 }} uses: actions/download-artifact@v4 with: - name: test-results - path: test-results + name: test-results-x86 + path: test-results-x86 - name: Run topotests run: | uname -a @@ -75,37 +75,37 @@ jobs: sudo modprobe vrf || true sudo modprobe mpls-iptunnel sudo modprobe mpls-router - docker load --input /tmp/frr-ubuntu22.tar + docker load --input /tmp/frr-x86-ubuntu22.tar if ! grep CONFIG_IP_MROUTE_MULTIPLE_TABLES=y /boot/config*; then ADD_DOCKER_ENV+="-e MROUTE_VRF_MISSING=1" fi echo "ADD_DOCKER_ENV: ${ADD_DOCKER_ENV}" - if [ -f test-results/topotests.xml ]; then - ./tests/topotests/analyze.py -r test-results - ls -l test-results/topotests.xml - run_tests=$(./tests/topotests/analyze.py -r test-results | cut -f1 -d: | sort -u) + if [ -f test-results-x86/topotests.xml ]; then + ./tests/topotests/analyze.py -r test-results-x86 + ls -l test-results-x86/topotests.xml + run_tests=$(./tests/topotests/analyze.py -r test-results-x86 | cut -f1 -d: | sort -u) else echo "No test results dir" run_tests="" fi - rm -rf test-results* /tmp/topotests + rm -rf test-results-x86* /tmp/topotests echo RUN_TESTS: $run_tests - if docker run --init -i --privileged --name frr-ubuntu-cont ${ADD_DOCKER_ENV} -v /lib/modules:/lib/modules frr-ubuntu22 \ + if docker run --init -i --privileged --name frr-ubuntu-cont ${ADD_DOCKER_ENV} -v /lib/modules:/lib/modules frr-x86-ubuntu22 \ bash -c 'cd ~/frr/tests/topotests ; sudo -E pytest -n$(($(nproc) * 5 / 2)) --dist=loadfile '$run_tests; then echo "All tests passed." exit 0 fi # Grab the results from the container - if ! ./tests/topotests/analyze.py -Ar test-results -C frr-ubuntu-cont; then - if [ ! -d test-results ]; then + if ! ./tests/topotests/analyze.py -Ar test-results-x86 -C frr-ubuntu-cont; then + if [ ! -d test-results-x86 ]; then echo "ERROR: Basic failure in docker run, no test results directory available." >&2 exit 1; fi - if [ ! -f test-results/topotests.xml ]; then + if [ ! -f test-results-x86/topotests.xml ]; then # In this case we may be missing topotests.xml echo "ERROR: No topotests.xml available perhaps docker run aborted?" >&2 exit 1; @@ -114,11 +114,11 @@ jobs: fi # Save some information useful for debugging - cp /boot/config* test-results/ - sysctl -a > test-results/sysctl.out 2> /dev/null + cp /boot/config* test-results-x86/ + sysctl -a > test-results-x86/sysctl.out 2> /dev/null # Now get the failed tests (if any) from the archived results directory. - rerun_tests=$(./tests/topotests/analyze.py -r test-results | cut -f1 -d: | sort -u) + rerun_tests=$(./tests/topotests/analyze.py -r test-results-x86 | cut -f1 -d: | sort -u) if [ -z "$rerun_tests" ]; then echo "All tests passed during parallel run." exit 0 @@ -129,8 +129,8 @@ jobs: docker stop frr-ubuntu-cont docker rm frr-ubuntu-cont - mv test-results test-results-initial - if docker run --init -i --privileged --name frr-ubuntu-cont ${ADD_DOCKER_ENV} -v /lib/modules:/lib/modules frr-ubuntu22 \ + mv test-results-x86 test-results-x86-initial + if docker run --init -i --privileged --name frr-ubuntu-cont ${ADD_DOCKER_ENV} -v /lib/modules:/lib/modules frr-x86-ubuntu22 \ bash -c 'cd ~/frr/tests/topotests ; sudo -E pytest '$rerun_tests; then echo "All rerun tests passed." exit 0 @@ -140,8 +140,8 @@ jobs: - name: Gather results if: ${{ always() }} run: | - if [ ! -d test-results ]; then - if ! ./tests/topotests/analyze.py -Ar test-results -C frr-ubuntu-cont; then + if [ ! -d test-results-x86 ]; then + if ! ./tests/topotests/analyze.py -Ar test-results-x86 -C frr-ubuntu-cont; then echo "ERROR: gathering results produced an error, perhaps due earlier run cancellation." >&2 fi fi @@ -149,15 +149,163 @@ jobs: if: ${{ always() }} uses: actions/upload-artifact@v4 with: - name: test-results + name: test-results-x86 path: | - test-results - test-results-initial + test-results-x86 + test-results-x86-initial overwrite: true - name: Cleanup if: ${{ always() }} run: | - rm -rf test-results* /tmp/frr-ubuntu22.tar + rm -rf test-results-x86* /tmp/frr-x86-ubuntu22.tar docker stop frr-ubuntu-cont || true docker rm frr-ubuntu-cont || true + build-arm-docker: + name: Build the ARM ubuntu 22.04 docker image + runs-on: ubuntu-22.04-arm + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 1 + - name: Build docker image + run: | + docker build -t frr-arm-ubuntu22 -f docker/ubuntu-ci/Dockerfile . + docker save --output /tmp/frr-arm-ubuntu22.tar frr-arm-ubuntu22 + - name: Upload docker image artifact + uses: actions/upload-artifact@v4 + with: + name: ubuntu-arm-image + path: /tmp/frr-arm-ubuntu22.tar + - name: Clear any previous results + # So if all jobs are re-run then all tests will be re-run + run: | + rm -rf test-results-arm* + mkdir -p test-results-arm + touch test-results-arm/cleared-results.txt + - name: Save cleared previous results + uses: actions/upload-artifact@v4 + with: + name: test-results-arm + path: test-results-arm + overwrite: true + - name: Cleanup + if: ${{ always() }} + run: rm -rf test-results-arm* /tmp/frr-arm-ubuntu22.tar + + test-arm-docker: + name: Test ubuntu ARM docker image + needs: build-arm-docker + runs-on: ubuntu-22.04-arm + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 1 + - name: Fetch docker image artifact + uses: actions/download-artifact@v4 + with: + name: ubuntu-arm-image + path: /tmp + - name: Fetch previous results + if: ${{ github.run_attempt > 1 }} + uses: actions/download-artifact@v4 + with: + name: test-results-arm + path: test-results-arm + - name: Run topotests + run: | + uname -a + MODPKGVER=$(uname -r) + sudo apt-get update -y + # Github is running old kernels but installing newer packages :( + sudo apt-get install -y linux-modules-extra-azure linux-modules-${MODPKGVER} linux-modules-extra-${MODPKGVER} python3-xmltodict + sudo modprobe vrf || true + sudo modprobe mpls-iptunnel + sudo modprobe mpls-router + docker load --input /tmp/frr-arm-ubuntu22.tar + + if ! grep CONFIG_IP_MROUTE_MULTIPLE_TABLES=y /boot/config*; then + ADD_DOCKER_ENV+="-e MROUTE_VRF_MISSING=1" + fi + echo "ADD_DOCKER_ENV: ${ADD_DOCKER_ENV}" + + if [ -f test-results-arm/topotests.xml ]; then + ./tests/topotests/analyze.py -r test-results-arm + ls -l test-results-arm/topotests.xml + run_tests=$(./tests/topotests/analyze.py -r test-results-arm | cut -f1 -d: | sort -u) + else + echo "No test results dir" + run_tests="" + fi + rm -rf test-results-arm* /tmp/topotests + + echo RUN_TESTS: $run_tests + if docker run --init -i --privileged --name frr-ubuntu-cont ${ADD_DOCKER_ENV} -v /lib/modules:/lib/modules frr-arm-ubuntu22 \ + bash -c 'cd ~/frr/tests/topotests ; sudo -E pytest -n$(($(nproc) * 5 / 2)) --dist=loadfile '$run_tests; then + echo "All tests passed." + exit 0 + fi + + # Grab the results from the container + if ! ./tests/topotests/analyze.py -Ar test-results-arm -C frr-ubuntu-cont; then + if [ ! -d test-results-arm ]; then + echo "ERROR: Basic failure in docker run, no test results directory available." >&2 + exit 1; + fi + if [ ! -f test-results-arm/topotests.xml ]; then + # In this case we may be missing topotests.xml + echo "ERROR: No topotests.xml available perhaps docker run aborted?" >&2 + exit 1; + fi + echo "WARNING: analyyze.py returned error but grabbed results anyway." >&2 + fi + + # Save some information useful for debugging + cp /boot/config* test-results-arm/ + sysctl -a > test-results-arm/sysctl.out 2> /dev/null + + # Now get the failed tests (if any) from the archived results directory. + rerun_tests=$(./tests/topotests/analyze.py -r test-results-arm | cut -f1 -d: | sort -u) + if [ -z "$rerun_tests" ]; then + echo "All tests passed during parallel run." + exit 0 + fi + + echo "ERROR: Some tests failed during parallel run, rerunning serially." >&2 + echo RERUN_TESTS: $rerun_tests >&2 + docker stop frr-ubuntu-cont + docker rm frr-ubuntu-cont + + mv test-results-arm test-results-arm-initial + if docker run --init -i --privileged --name frr-ubuntu-cont ${ADD_DOCKER_ENV} -v /lib/modules:/lib/modules frr-arm-ubuntu22 \ + bash -c 'cd ~/frr/tests/topotests ; sudo -E pytest '$rerun_tests; then + echo "All rerun tests passed." + exit 0 + fi + echo "Some rerun tests still failed." + exit 1 + - name: Gather results + if: ${{ always() }} + run: | + if [ ! -d test-results-arm ]; then + if ! ./tests/topotests/analyze.py -Ar test-results-arm -C frr-ubuntu-cont; then + echo "ERROR: gathering results produced an error, perhaps due earlier run cancellation." >&2 + fi + fi + - name: Upload test results + if: ${{ always() }} + uses: actions/upload-artifact@v4 + with: + name: test-results-arm + path: | + test-results-arm + test-results-arm-initial + overwrite: true + - name: Cleanup + if: ${{ always() }} + run: | + rm -rf test-results-arm* /tmp/frr-arm-ubuntu22.tar + docker stop frr-ubuntu-cont || true + docker rm frr-ubuntu-cont || true diff --git a/bgpd/bgp_addpath.c b/bgpd/bgp_addpath.c index aada6e555f..030db4b28e 100644 --- a/bgpd/bgp_addpath.c +++ b/bgpd/bgp_addpath.c @@ -10,8 +10,6 @@ #include "bgp_addpath.h" #include "bgp_route.h" -#include "bgp_open.h" -#include "bgp_packet.h" static const struct bgp_addpath_strategy_names strat_names[BGP_ADDPATH_MAX] = { { @@ -361,30 +359,6 @@ void bgp_addpath_type_changed(struct bgp *bgp) } } -int bgp_addpath_capability_action(enum bgp_addpath_strat addpath_type, uint16_t paths) -{ - int action = CAPABILITY_ACTION_UNSET; - - switch (addpath_type) { - case BGP_ADDPATH_ALL: - case BGP_ADDPATH_BEST_PER_AS: - action = CAPABILITY_ACTION_SET; - break; - case BGP_ADDPATH_BEST_SELECTED: - if (paths) - action = CAPABILITY_ACTION_SET; - else - action = CAPABILITY_ACTION_UNSET; - break; - case BGP_ADDPATH_NONE: - case BGP_ADDPATH_MAX: - action = CAPABILITY_ACTION_UNSET; - break; - } - - return action; -} - /* * Change the addpath type assigned to a peer, or peer group. In addition to * adjusting the counts, peer sessions will be reset as needed to make the @@ -398,7 +372,6 @@ void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi, struct listnode *node, *nnode; struct peer *tmp_peer; struct peer_group *group; - int action = bgp_addpath_capability_action(addpath_type, paths); if (safi == SAFI_LABELED_UNICAST) safi = SAFI_UNICAST; @@ -456,12 +429,9 @@ void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi, } } } else { - if (!CHECK_FLAG(peer->cap, PEER_CAP_DYNAMIC_RCV) && - !CHECK_FLAG(peer->cap, PEER_CAP_DYNAMIC_ADV)) - peer_change_action(peer, afi, safi, peer_change_reset); + peer_change_action(peer, afi, safi, peer_change_reset); } - bgp_capability_send(peer, afi, safi, CAPABILITY_CODE_ADDPATH, action); } /* diff --git a/bgpd/bgp_addpath.h b/bgpd/bgp_addpath.h index f1ff98ea7a..c136671ea4 100644 --- a/bgpd/bgp_addpath.h +++ b/bgpd/bgp_addpath.h @@ -15,11 +15,7 @@ #include "bgpd/bgp_table.h" #include "lib/json.h" -struct bgp_addpath_capability { - uint16_t afi; - uint8_t safi; - uint8_t flags; -}; +#define BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE 1 struct bgp_paths_limit_capability { uint16_t afi; @@ -27,8 +23,6 @@ struct bgp_paths_limit_capability { uint16_t paths_limit; } __attribute__((packed)); -#define BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE 1 - void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data *d); bool bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi, @@ -68,5 +62,4 @@ void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_dest *dest, afi_t afi, safi_t safi); void bgp_addpath_type_changed(struct bgp *bgp); -extern int bgp_addpath_capability_action(enum bgp_addpath_strat addpath_type, uint16_t paths); #endif diff --git a/bgpd/bgp_attr.h b/bgpd/bgp_attr.h index c5532f4005..341c062510 100644 --- a/bgpd/bgp_attr.h +++ b/bgpd/bgp_attr.h @@ -323,7 +323,6 @@ struct attr { /* rmap_change_flags definition */ #define BATTR_RMAP_IPV4_NHOP_CHANGED (1 << 0) #define BATTR_RMAP_NEXTHOP_PEER_ADDRESS (1 << 1) -#define BATTR_REFLECTED (1 << 2) #define BATTR_RMAP_NEXTHOP_UNCHANGED (1 << 3) #define BATTR_RMAP_IPV6_GLOBAL_NHOP_CHANGED (1 << 4) #define BATTR_RMAP_IPV6_LL_NHOP_CHANGED (1 << 5) @@ -604,10 +603,11 @@ static inline uint64_t bgp_aigp_metric_total(struct bgp_path_info *bpi) { uint64_t aigp = bgp_attr_get_aigp_metric(bpi->attr); - if (bpi->nexthop) - return aigp + bpi->nexthop->metric; - else + /* Don't increment if it's locally sourced */ + if (bpi->peer == bpi->peer->bgp->peer_self) return aigp; + + return bpi->extra ? (aigp + bpi->extra->igpmetric) : aigp; } static inline void bgp_attr_set_med(struct attr *attr, uint32_t med) diff --git a/bgpd/bgp_bfd.c b/bgpd/bgp_bfd.c index 50b00d21b1..5437b67f34 100644 --- a/bgpd/bgp_bfd.c +++ b/bgpd/bgp_bfd.c @@ -114,6 +114,10 @@ void bgp_peer_config_apply(struct peer *p, struct peer_group *pg) */ gconfig = pg->conf; + if (CHECK_FLAG(gconfig->flags, PEER_FLAG_UPDATE_SOURCE) || + CHECK_FLAG(p->flags_override, PEER_FLAG_UPDATE_SOURCE)) + bgp_peer_bfd_update_source(p); + /* * If using default control plane independent configuration, * then prefer group's (e.g. it means it wasn't manually configured). @@ -188,7 +192,7 @@ void bgp_peer_bfd_update_source(struct peer *p) } } } else { - source = p->su_local; + source = p->connection->su_local; } /* Update peer's source/destination addresses. */ @@ -316,13 +320,14 @@ void bgp_peer_configure_bfd(struct peer *p, bool manual) /* Configure session with basic BGP peer data. */ if (p->connection->su.sa.sa_family == AF_INET) bfd_sess_set_ipv4_addrs(p->bfd_config->session, - p->su_local ? &p->su_local->sin.sin_addr - : NULL, + p->connection->su_local + ? &p->connection->su_local->sin.sin_addr + : NULL, &p->connection->su.sin.sin_addr); else bfd_sess_set_ipv6_addrs(p->bfd_config->session, - p->su_local - ? &p->su_local->sin6.sin6_addr + p->connection->su_local + ? &p->connection->su_local->sin6.sin6_addr : NULL, &p->connection->su.sin6.sin6_addr); diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c index acc49cac94..e458e5e5ac 100644 --- a/bgpd/bgp_bmp.c +++ b/bgpd/bgp_bmp.c @@ -28,6 +28,7 @@ #include "bgpd/bgp_table.h" #include "bgpd/bgpd.h" #include "bgpd/bgp_route.h" +#include "bgpd/bgp_nht.h" #include "bgpd/bgp_attr.h" #include "bgpd/bgp_dump.h" #include "bgpd/bgp_errors.h" @@ -49,6 +50,12 @@ static struct bmp_bgp_peer *bmp_bgp_peer_find(uint64_t peerid); static struct bmp_bgp_peer *bmp_bgp_peer_get(struct peer *peer); static void bmp_active_disconnected(struct bmp_active *ba); static void bmp_active_put(struct bmp_active *ba); +static int bmp_route_update_bgpbmp(struct bmp_targets *bt, afi_t afi, safi_t safi, + struct bgp_dest *bn, struct bgp_path_info *old_route, + struct bgp_path_info *new_route); +static void bmp_send_all_bgp(struct peer *peer, bool down); +static struct bmp_imported_bgp *bmp_imported_bgp_find(struct bmp_targets *bt, char *name); +static void bmp_stats_per_instance(struct bgp *bgp, struct bmp_targets *bt); DEFINE_MGROUP(BMP, "BMP (BGP Monitoring Protocol)"); @@ -63,6 +70,7 @@ DEFINE_MTYPE_STATIC(BMP, BMP, "BMP instance state"); DEFINE_MTYPE_STATIC(BMP, BMP_MIRRORQ, "BMP route mirroring buffer"); DEFINE_MTYPE_STATIC(BMP, BMP_PEER, "BMP per BGP peer data"); DEFINE_MTYPE_STATIC(BMP, BMP_OPEN, "BMP stored BGP OPEN message"); +DEFINE_MTYPE_STATIC(BMP, BMP_IMPORTED_BGP, "BMP imported BGP instance"); DEFINE_QOBJ_TYPE(bmp_targets); @@ -140,6 +148,17 @@ static int bmp_targets_cmp(const struct bmp_targets *a, DECLARE_SORTLIST_UNIQ(bmp_targets, struct bmp_targets, bti, bmp_targets_cmp); +static int bmp_imported_bgps_cmp(const struct bmp_imported_bgp *a, const struct bmp_imported_bgp *b) +{ + if (a->name == NULL && b->name == NULL) + return 0; + if (a->name == NULL || b->name == NULL) + return 1; + return strcmp(a->name, b->name); +} + +DECLARE_SORTLIST_UNIQ(bmp_imported_bgps, struct bmp_imported_bgp, bib, bmp_imported_bgps_cmp); + DECLARE_LIST(bmp_session, struct bmp, bsi); DECLARE_DLIST(bmp_qlist, struct bmp_queue_entry, bli); @@ -227,6 +246,7 @@ static struct bmp *bmp_new(struct bmp_targets *bt, int bmp_sock) new->targets = bt; new->socket = bmp_sock; new->syncafi = AFI_MAX; + new->sync_bgp = NULL; FOREACH_AFI_SAFI (afi, safi) { new->afistate[afi][safi] = bt->afimon[afi][safi] @@ -398,14 +418,17 @@ static void bmp_put_info_tlv(struct stream *s, uint16_t type, /* put the vrf table name of the bgp instance bmp is bound to in a tlv on the * stream */ -static void bmp_put_vrftablename_info_tlv(struct stream *s, struct bgp *bgp) +static void bmp_put_vrftablename_info_tlv(struct stream *s, struct peer *peer) { const char *vrftablename = "global"; + struct vrf *vrf; #define BMP_INFO_TYPE_VRFTABLENAME 3 - if (bgp->inst_type != BGP_INSTANCE_TYPE_DEFAULT) - vrftablename = bgp->name; + if (peer->bgp->inst_type != BGP_INSTANCE_TYPE_DEFAULT) { + vrf = vrf_lookup_by_id(peer->bgp->vrf_id); + vrftablename = vrf ? vrf->name : NULL; + } if (vrftablename != NULL) bmp_put_info_tlv(s, BMP_INFO_TYPE_VRFTABLENAME, vrftablename); @@ -495,29 +518,29 @@ static struct stream *bmp_peerstate(struct peer *peer, bool down) /* Local Address (16 bytes) */ if (is_locrib) stream_put(s, 0, 16); - else if (peer->su_local->sa.sa_family == AF_INET6) - stream_put(s, &peer->su_local->sin6.sin6_addr, 16); - else if (peer->su_local->sa.sa_family == AF_INET) { + else if (peer->connection->su_local->sa.sa_family == AF_INET6) + stream_put(s, &peer->connection->su_local->sin6.sin6_addr, 16); + else if (peer->connection->su_local->sa.sa_family == AF_INET) { stream_putl(s, 0); stream_putl(s, 0); stream_putl(s, 0); - stream_put_in_addr(s, &peer->su_local->sin.sin_addr); + stream_put_in_addr(s, &peer->connection->su_local->sin.sin_addr); } /* Local Port, Remote Port */ - if (!peer->su_local || is_locrib) + if (!peer->connection->su_local || is_locrib) stream_putw(s, 0); - else if (peer->su_local->sa.sa_family == AF_INET6) - stream_putw(s, htons(peer->su_local->sin6.sin6_port)); - else if (peer->su_local->sa.sa_family == AF_INET) - stream_putw(s, htons(peer->su_local->sin.sin_port)); + else if (peer->connection->su_local->sa.sa_family == AF_INET6) + stream_putw(s, htons(peer->connection->su_local->sin6.sin6_port)); + else if (peer->connection->su_local->sa.sa_family == AF_INET) + stream_putw(s, htons(peer->connection->su_local->sin.sin_port)); - if (!peer->su_remote || is_locrib) + if (!peer->connection->su_remote || is_locrib) stream_putw(s, 0); - else if (peer->su_remote->sa.sa_family == AF_INET6) - stream_putw(s, htons(peer->su_remote->sin6.sin6_port)); - else if (peer->su_remote->sa.sa_family == AF_INET) - stream_putw(s, htons(peer->su_remote->sin.sin_port)); + else if (peer->connection->su_remote->sa.sa_family == AF_INET6) + stream_putw(s, htons(peer->connection->su_remote->sin6.sin6_port)); + else if (peer->connection->su_remote->sa.sa_family == AF_INET) + stream_putw(s, htons(peer->connection->su_remote->sin.sin_port)); /* TODO craft message with fields & capabilities for loc-rib */ static const uint8_t dummy_open[] = { @@ -589,63 +612,123 @@ static struct stream *bmp_peerstate(struct peer *peer, bool down) } if (is_locrib) - bmp_put_vrftablename_info_tlv(s, peer->bgp); + bmp_put_vrftablename_info_tlv(s, peer); len = stream_get_endp(s); stream_putl_at(s, BMP_LENGTH_POS, len); /* message length is set. */ return s; } - -static int bmp_send_peerup(struct bmp *bmp) +static int bmp_send_peerup_per_instance(struct bmp *bmp, struct bgp *bgp) { struct peer *peer; struct listnode *node; struct stream *s; /* Walk down all peers */ - for (ALL_LIST_ELEMENTS_RO(bmp->targets->bgp->peer, node, peer)) { + for (ALL_LIST_ELEMENTS_RO(bgp->peer, node, peer)) { s = bmp_peerstate(peer, false); if (s) { pullwr_write_stream(bmp->pullwr, s); stream_free(s); } } + return 0; +} + +static int bmp_send_peerup(struct bmp *bmp) +{ + struct bmp_imported_bgp *bib; + struct bgp *bgp; + + bmp_send_peerup_per_instance(bmp, bmp->targets->bgp); + frr_each (bmp_imported_bgps, &bmp->targets->imported_bgps, bib) { + bgp = bgp_lookup_by_name(bib->name); + if (bgp) + bmp_send_peerup_per_instance(bmp, bgp); + } return 0; } -static int bmp_send_peerup_vrf(struct bmp *bmp) +static void bmp_send_peerup_vrf_per_instance(struct bmp *bmp, enum bmp_vrf_state *vrf_state, + struct bgp *bgp) { - struct bmp_bgp *bmpbgp = bmp->targets->bmpbgp; struct stream *s; /* send unconditionally because state may has been set before the * session was up. and in this case the peer up has not been sent. */ - bmp_bgp_update_vrf_status(bmpbgp, vrf_state_unknown); + bmp_bgp_update_vrf_status(vrf_state, bgp, vrf_state_unknown); - s = bmp_peerstate(bmpbgp->bgp->peer_self, bmpbgp->vrf_state == vrf_state_down); + s = bmp_peerstate(bgp->peer_self, *vrf_state == vrf_state_down); if (s) { pullwr_write_stream(bmp->pullwr, s); stream_free(s); } +} + +static int bmp_send_peerup_vrf(struct bmp *bmp) +{ + struct bgp *bgp; + struct bmp_imported_bgp *bib; + struct bmp_bgp *bmpbgp = bmp->targets->bmpbgp; + struct bmp_targets *bt; + + bmp_send_peerup_vrf_per_instance(bmp, &bmpbgp->vrf_state, bmpbgp->bgp); + + frr_each (bmp_targets, &bmpbgp->targets, bt) { + frr_each (bmp_imported_bgps, &bt->imported_bgps, bib) { + bgp = bgp_lookup_by_name(bib->name); + if (!bgp) + continue; + bmp_send_peerup_vrf_per_instance(bmp, &bib->vrf_state, bgp); + } + } return 0; } +static void bmp_send_bt(struct bmp_targets *bt, struct stream *s) +{ + struct bmp *bmp; + + frr_each (bmp_session, &bt->sessions, bmp) + pullwr_write_stream(bmp->pullwr, s); +} + +static void bmp_send_bt_safe(struct bmp_targets *bt, struct stream *s) +{ + if (!s) + return; + + bmp_send_bt(bt, s); + + stream_free(s); +} + +static void bmp_send_peerdown_vrf_per_instance(struct bmp_targets *bt, struct bgp *bgp) +{ + struct stream *s; + + s = bmp_peerstate(bgp->peer_self, true); + if (!s) + return; + bmp_send_bt(bt, s); + stream_free(s); +} + /* send a stream to all bmp sessions configured in a bgp instance */ /* XXX: kludge - filling the pullwr's buffer */ static void bmp_send_all(struct bmp_bgp *bmpbgp, struct stream *s) { struct bmp_targets *bt; - struct bmp *bmp; if (!s) return; frr_each(bmp_targets, &bmpbgp->targets, bt) - frr_each(bmp_session, &bt->sessions, bmp) - pullwr_write_stream(bmp->pullwr, s); + bmp_send_bt(bt, s); + stream_free(s); } @@ -724,11 +807,13 @@ static void bmp_mirror_cull(struct bmp_bgp *bmpbgp) static int bmp_mirror_packet(struct peer *peer, uint8_t type, bgp_size_t size, struct stream *packet) { - struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp); + struct bmp_bgp *bmpbgp; struct timeval tv; - struct bmp_mirrorq *qitem; + struct bmp_mirrorq *qitem = NULL; struct bmp_targets *bt; struct bmp *bmp; + struct bgp *bgp_vrf; + struct listnode *node; frrtrace(3, frr_bgp, bmp_mirror_packet, peer, type, packet); @@ -744,8 +829,6 @@ static int bmp_mirror_packet(struct peer *peer, uint8_t type, bgp_size_t size, memcpy(bbpeer->open_rx, packet->data, size); } - if (!bmpbgp) - return 0; qitem = XCALLOC(MTYPE_BMP_MIRRORQ, sizeof(*qitem) + size); qitem->peerid = peer->qobj_node.nid; @@ -753,27 +836,41 @@ static int bmp_mirror_packet(struct peer *peer, uint8_t type, bgp_size_t size, qitem->len = size; memcpy(qitem->data, packet->data, size); - frr_each(bmp_targets, &bmpbgp->targets, bt) { - if (!bt->mirror) + for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp_vrf)) { + bmpbgp = bmp_bgp_find(bgp_vrf); + if (!bmpbgp) continue; - frr_each(bmp_session, &bt->sessions, bmp) { - qitem->refcount++; - if (!bmp->mirrorpos) - bmp->mirrorpos = qitem; - pullwr_bump(bmp->pullwr); - } - } - if (qitem->refcount == 0) - XFREE(MTYPE_BMP_MIRRORQ, qitem); - else { - bmpbgp->mirror_qsize += sizeof(*qitem) + size; - bmp_mirrorq_add_tail(&bmpbgp->mirrorq, qitem); + frr_each (bmp_targets, &bmpbgp->targets, bt) { + if (!bt->mirror) + continue; + + if (bgp_vrf != peer->bgp && !bmp_imported_bgp_find(bt, peer->bgp->name)) + continue; + + frr_each (bmp_session, &bt->sessions, bmp) { + if (!qitem) { + qitem = XCALLOC(MTYPE_BMP_MIRRORQ, sizeof(*qitem) + size); + qitem->peerid = peer->qobj_node.nid; + qitem->tv = tv; + qitem->len = size; + memcpy(qitem->data, packet->data, size); + } + + qitem->refcount++; + if (!bmp->mirrorpos) + bmp->mirrorpos = qitem; + pullwr_bump(bmp->pullwr); + } + bmpbgp->mirror_qsize += sizeof(*qitem) + size; + bmp_mirrorq_add_tail(&bmpbgp->mirrorq, qitem); - bmp_mirror_cull(bmpbgp); + bmp_mirror_cull(bmpbgp); - bmpbgp->mirror_qsizemax = MAX(bmpbgp->mirror_qsizemax, - bmpbgp->mirror_qsize); + bmpbgp->mirror_qsizemax = MAX(bmpbgp->mirror_qsizemax, bmpbgp->mirror_qsize); + } } + if (qitem && qitem->refcount == 0) + XFREE(MTYPE_BMP_MIRRORQ, qitem); return 0; } @@ -846,8 +943,7 @@ static bool bmp_wrmirror(struct bmp *bmp, struct pullwr *pullwr) s = stream_new(BGP_MAX_PACKET_SIZE); bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_ROUTE_MIRRORING); - bmp_per_peer_hdr(s, bmp->targets->bgp, peer, 0, peer_type_flag, peer_distinguisher, - &bmq->tv); + bmp_per_peer_hdr(s, peer->bgp, peer, 0, peer_type_flag, peer_distinguisher, &bmq->tv); /* BMP Mirror TLV. */ stream_putw(s, BMP_MIRROR_TLV_TYPE_BGP_MESSAGE); @@ -886,14 +982,10 @@ static int bmp_outgoing_packet(struct peer *peer, uint8_t type, bgp_size_t size, static int bmp_peer_status_changed(struct peer *peer) { - struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp); struct bmp_bgp_peer *bbpeer, *bbdopp; frrtrace(1, frr_bgp, bmp_peer_status_changed, peer); - if (!bmpbgp) - return 0; - if (peer->connection->status == Deleted) { bbpeer = bmp_bgp_peer_find(peer->qobj_node.nid); if (bbpeer) { @@ -928,20 +1020,16 @@ static int bmp_peer_status_changed(struct peer *peer) } } - bmp_send_all_safe(bmpbgp, bmp_peerstate(peer, false)); + bmp_send_all_bgp(peer, false); return 0; } static int bmp_peer_backward(struct peer *peer) { - struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp); struct bmp_bgp_peer *bbpeer; frrtrace(1, frr_bgp, bmp_peer_backward_transition, peer); - if (!bmpbgp) - return 0; - bbpeer = bmp_bgp_peer_find(peer->qobj_node.nid); if (bbpeer) { XFREE(MTYPE_BMP_OPEN, bbpeer->open_tx); @@ -950,12 +1038,12 @@ static int bmp_peer_backward(struct peer *peer) bbpeer->open_rx_len = 0; } - bmp_send_all_safe(bmpbgp, bmp_peerstate(peer, true)); + bmp_send_all_bgp(peer, true); return 0; } -static void bmp_eor(struct bmp *bmp, afi_t afi, safi_t safi, uint8_t flags, - uint8_t peer_type_flag) +static void bmp_eor(struct bmp *bmp, afi_t afi, safi_t safi, uint8_t flags, uint8_t peer_type_flag, + struct bgp *bgp) { struct peer *peer; struct listnode *node; @@ -963,7 +1051,7 @@ static void bmp_eor(struct bmp *bmp, afi_t afi, safi_t safi, uint8_t flags, iana_afi_t pkt_afi = IANA_AFI_IPV4; iana_safi_t pkt_safi = IANA_SAFI_UNICAST; - frrtrace(4, frr_bgp, bmp_eor, afi, safi, flags, peer_type_flag); + frrtrace(5, frr_bgp, bmp_eor, afi, safi, flags, peer_type_flag, bgp); s = stream_new(BGP_MAX_PACKET_SIZE); @@ -991,7 +1079,7 @@ static void bmp_eor(struct bmp *bmp, afi_t afi, safi_t safi, uint8_t flags, bgp_packet_set_size(s); - for (ALL_LIST_ELEMENTS_RO(bmp->targets->bgp->peer, node, peer)) { + for (ALL_LIST_ELEMENTS_RO(bgp->peer, node, peer)) { if (!peer->afc_nego[afi][safi]) continue; @@ -1008,8 +1096,7 @@ static void bmp_eor(struct bmp *bmp, afi_t afi, safi_t safi, uint8_t flags, bmp_common_hdr(s2, BMP_VERSION_3, BMP_TYPE_ROUTE_MONITORING); - bmp_per_peer_hdr(s2, bmp->targets->bgp, peer, flags, - peer_type_flag, peer_distinguisher, NULL); + bmp_per_peer_hdr(s2, bgp, peer, flags, peer_type_flag, peer_distinguisher, NULL); stream_putl_at(s2, BMP_LENGTH_POS, stream_get_endp(s) + stream_get_endp(s2)); @@ -1140,8 +1227,7 @@ static void bmp_monitor(struct bmp *bmp, struct peer *peer, uint8_t flags, hdr = stream_new(BGP_MAX_PACKET_SIZE); bmp_common_hdr(hdr, BMP_VERSION_3, BMP_TYPE_ROUTE_MONITORING); - bmp_per_peer_hdr(hdr, bmp->targets->bgp, peer, flags, peer_type_flag, - peer_distinguisher, + bmp_per_peer_hdr(hdr, peer->bgp, peer, flags, peer_type_flag, peer_distinguisher, uptime == (time_t)(-1L) ? NULL : &uptime_real); stream_putl_at(hdr, BMP_LENGTH_POS, @@ -1154,6 +1240,93 @@ static void bmp_monitor(struct bmp *bmp, struct peer *peer, uint8_t flags, stream_free(msg); } +static struct bgp *bmp_get_next_bgp(struct bmp_targets *bt, struct bgp *bgp, afi_t afi, safi_t safi) +{ + struct bmp_imported_bgp *bib; + struct bgp *bgp_inst; + bool get_first = false; + + if (bgp == NULL && bt->bgp_request_sync[afi][safi]) + return bt->bgp; + if (bgp == NULL) + get_first = true; + frr_each (bmp_imported_bgps, &bt->imported_bgps, bib) { + bgp_inst = bgp_lookup_by_name(bib->name); + if (get_first && bgp_inst && bib->bgp_request_sync[afi][safi]) + return bgp_inst; + if (bgp_inst == bgp) + get_first = true; + } + return NULL; +} + +static void bmp_update_syncro(struct bmp *bmp, afi_t afi, safi_t safi, struct bgp *bgp) +{ + struct bmp_imported_bgp *bib; + + if (bmp->syncafi == afi && bmp->syncsafi == safi) { + bmp->syncafi = AFI_MAX; + bmp->syncsafi = SAFI_MAX; + bmp->sync_bgp = NULL; + } + + if (!bmp->targets->afimon[afi][safi]) { + bmp->afistate[afi][safi] = BMP_AFI_INACTIVE; + return; + } + + bmp->afistate[afi][safi] = BMP_AFI_NEEDSYNC; + + if (bgp == NULL || bmp->targets->bgp == bgp) + bmp->targets->bgp_request_sync[afi][safi] = true; + + frr_each (bmp_imported_bgps, &bmp->targets->imported_bgps, bib) { + if (bgp != NULL && bgp_lookup_by_name(bib->name) != bgp) + continue; + bib->bgp_request_sync[afi][safi] = true; + } +} + +static void bmp_update_syncro_set(struct bmp *bmp, afi_t afi, safi_t safi, struct bgp *bgp, + enum bmp_afi_state state) +{ + struct bmp_imported_bgp *bib; + + bmp->afistate[afi][safi] = state; + bmp->syncafi = AFI_MAX; + bmp->syncsafi = SAFI_MAX; + if (bgp == NULL || bmp->targets->bgp == bmp->sync_bgp) + bmp->targets->bgp_request_sync[afi][safi] = false; + + frr_each (bmp_imported_bgps, &bmp->targets->imported_bgps, bib) { + if (bgp == NULL || bgp_lookup_by_name(bib->name) != bmp->sync_bgp) + continue; + bib->bgp_request_sync[afi][safi] = false; + } +} + +static void bmp_eor_afi_safi(struct bmp *bmp, afi_t afi, safi_t safi, uint8_t peer_type_flag) +{ + struct bgp *sync_bgp; + + zlog_info("bmp[%s] %s %s table completed (EoR) (BGP %s)", bmp->remote, afi2str(afi), + safi2str(safi), bmp->sync_bgp->name_pretty); + + bmp_eor(bmp, afi, safi, BMP_PEER_FLAG_L, peer_type_flag, bmp->sync_bgp); + bmp_eor(bmp, afi, safi, 0, peer_type_flag, bmp->sync_bgp); + bmp_eor(bmp, afi, safi, 0, BMP_PEER_TYPE_LOC_RIB_INSTANCE, bmp->sync_bgp); + + sync_bgp = bmp_get_next_bgp(bmp->targets, bmp->sync_bgp, afi, safi); + if (sync_bgp) { + memset(&bmp->syncpos, 0, sizeof(bmp->syncpos)); + bmp->syncpos.family = afi2family(afi); + bmp->syncrdpos = NULL; + bmp->syncpeerid = 0; + } else + bmp_update_syncro_set(bmp, afi, safi, bmp->sync_bgp, BMP_AFI_LIVE); + bmp->sync_bgp = sync_bgp; +} + static bool bmp_wrsync(struct bmp *bmp, struct pullwr *pullwr) { uint8_t bpi_num_labels, adjin_num_labels; @@ -1174,10 +1347,13 @@ static bool bmp_wrsync(struct bmp *bmp, struct pullwr *pullwr) memset(&bmp->syncpos, 0, sizeof(bmp->syncpos)); bmp->syncpos.family = afi2family(afi); bmp->syncrdpos = NULL; - zlog_info("bmp[%s] %s %s sending table", - bmp->remote, - afi2str(bmp->syncafi), - safi2str(bmp->syncsafi)); + bmp->sync_bgp = bmp_get_next_bgp(bmp->targets, NULL, afi, safi); + if (bmp->sync_bgp == NULL) + /* all BGP instances already synced*/ + return true; + zlog_info("bmp[%s] %s %s sending table (BGP %s)", bmp->remote, + afi2str(bmp->syncafi), safi2str(bmp->syncsafi), + bmp->sync_bgp->name_pretty); /* break does not work here, 2 loops... */ goto afibreak; } @@ -1191,18 +1367,22 @@ afibreak: if (!bmp->targets->afimon[afi][safi]) { /* shouldn't happen */ - bmp->afistate[afi][safi] = BMP_AFI_INACTIVE; - bmp->syncafi = AFI_MAX; - bmp->syncsafi = SAFI_MAX; + bmp_update_syncro_set(bmp, afi, safi, bmp->sync_bgp, BMP_AFI_INACTIVE); + bmp->sync_bgp = NULL; return true; } + if (bmp->sync_bgp == NULL) { + bmp->sync_bgp = bmp_get_next_bgp(bmp->targets, NULL, afi, safi); + if (bmp->sync_bgp == NULL) + return true; + } - struct bgp_table *table = bmp->targets->bgp->rib[afi][safi]; + struct bgp_table *table = bmp->sync_bgp->rib[afi][safi]; struct bgp_dest *bn = NULL; struct bgp_path_info *bpi = NULL, *bpiter; struct bgp_adj_in *adjin = NULL, *adjiter; - peer_type_flag = bmp_get_peer_type_vrf(bmp->targets->bgp->vrf_id); + peer_type_flag = bmp_get_peer_type_vrf(bmp->sync_bgp->vrf_id); if ((afi == AFI_L2VPN && safi == SAFI_EVPN) || (safi == SAFI_MPLS_VPN)) { @@ -1254,19 +1434,9 @@ afibreak: return true; } eor: - zlog_info("bmp[%s] %s %s table completed (EoR)", - bmp->remote, afi2str(afi), - safi2str(safi)); - - bmp_eor(bmp, afi, safi, BMP_PEER_FLAG_L, peer_type_flag); - bmp_eor(bmp, afi, safi, 0, peer_type_flag); - bmp_eor(bmp, afi, safi, 0, - BMP_PEER_TYPE_LOC_RIB_INSTANCE); - - bmp->afistate[afi][safi] = BMP_AFI_LIVE; - bmp->syncafi = AFI_MAX; - bmp->syncsafi = SAFI_MAX; - return true; + bmp_eor_afi_safi(bmp, afi, safi, + peer_type_flag); + return true; } bmp->syncpeerid = 0; prefix_copy(&bmp->syncpos, bgp_dest_get_prefix(bn)); @@ -1445,7 +1615,7 @@ static bool bmp_wrqueue_locrib(struct bmp *bmp, struct pullwr *pullwr) */ goto out; } - if (peer != bmp->targets->bgp->peer_self && !peer_established(peer->connection)) { + if (peer != peer->bgp->peer_self && !peer_established(peer->connection)) { /* peer is neither self, nor established */ goto out; @@ -1456,8 +1626,7 @@ static bool bmp_wrqueue_locrib(struct bmp *bmp, struct pullwr *pullwr) struct prefix_rd *prd = is_vpn ? &bqe->rd : NULL; - bn = bgp_safi_node_lookup(bmp->targets->bgp->rib[afi][safi], safi, - &bqe->p, prd); + bn = bgp_safi_node_lookup(peer->bgp->rib[afi][safi], safi, &bqe->p, prd); struct bgp_path_info *bpi; @@ -1533,8 +1702,7 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr) (bqe->safi == SAFI_MPLS_VPN); struct prefix_rd *prd = is_vpn ? &bqe->rd : NULL; - bn = bgp_safi_node_lookup(bmp->targets->bgp->rib[afi][safi], safi, - &bqe->p, prd); + bn = bgp_safi_node_lookup(peer->bgp->rib[afi][safi], safi, &bqe->p, prd); peer_type_flag = bmp_get_peer_type(peer); @@ -1584,11 +1752,16 @@ out: static void bmp_wrfill(struct bmp *bmp, struct pullwr *pullwr) { + afi_t afi; + safi_t safi; + switch(bmp->state) { case BMP_PeerUp: bmp_send_peerup_vrf(bmp); bmp_send_peerup(bmp); bmp->state = BMP_Run; + FOREACH_AFI_SAFI (afi, safi) + bmp_update_syncro(bmp, afi, safi, NULL); break; case BMP_Run: @@ -1666,9 +1839,12 @@ bmp_process_one(struct bmp_targets *bt, struct bmp_qhash_head *updhash, static int bmp_process(struct bgp *bgp, afi_t afi, safi_t safi, struct bgp_dest *bn, struct peer *peer, bool withdraw) { - struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp); + struct bmp_bgp *bmpbgp; struct bmp_targets *bt; struct bmp *bmp; + struct bgp *bgp_vrf; + struct listnode *node; + struct bmp_queue_entry *last_item; if (frrtrace_enabled(frr_bgp, bmp_process)) { char pfxprint[PREFIX2STR_BUFFER]; @@ -1678,36 +1854,59 @@ static int bmp_process(struct bgp *bgp, afi_t afi, safi_t safi, withdraw); } - if (!bmpbgp) - return 0; - - frr_each(bmp_targets, &bmpbgp->targets, bt) { - /* check if any monitoring is enabled (ignoring loc-rib since it - * uses another hook & queue - */ - if (!CHECK_FLAG(bt->afimon[afi][safi], ~BMP_MON_LOC_RIB)) + for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp_vrf)) { + bmpbgp = bmp_bgp_find(bgp_vrf); + if (!bmpbgp) continue; + frr_each (bmp_targets, &bmpbgp->targets, bt) { + /* check if any monitoring is enabled (ignoring loc-rib since it + * uses another hook & queue + */ + if (!CHECK_FLAG(bt->afimon[afi][safi], ~BMP_MON_LOC_RIB)) + continue; - struct bmp_queue_entry *last_item = - bmp_process_one(bt, &bt->updhash, &bt->updlist, bgp, - afi, safi, bn, peer); + if (bgp_vrf != peer->bgp && !bmp_imported_bgp_find(bt, peer->bgp->name)) + continue; - /* if bmp_process_one returns NULL - * we don't have anything to do next - */ - if (!last_item) - continue; + last_item = bmp_process_one(bt, &bt->updhash, &bt->updlist, bgp, afi, safi, + bn, peer); + /* if bmp_process_one returns NULL + * we don't have anything to do next + */ + if (!last_item) + continue; - frr_each(bmp_session, &bt->sessions, bmp) { - if (!bmp->queuepos) - bmp->queuepos = last_item; + frr_each (bmp_session, &bt->sessions, bmp) { + if (!bmp->queuepos) + bmp->queuepos = last_item; - pullwr_bump(bmp->pullwr); + pullwr_bump(bmp->pullwr); + } } } return 0; } +static int bmp_nht_path_valid(struct bgp *bgp, struct bgp_path_info *path, bool valid) +{ + struct bgp_dest *dest = path->net; + struct bgp_table *table; + + if (frrtrace_enabled(frr_bgp, bmp_nht_path_valid)) { + char pfxprint[PREFIX2STR_BUFFER]; + + prefix2str(&dest->rn->p, pfxprint, sizeof(pfxprint)); + frrtrace(4, frr_bgp, bmp_nht_path_valid, bgp, pfxprint, path, valid); + } + if (bgp->peer_self == path->peer) + /* self declared networks or redistributed networks are not relevant for bmp */ + return 0; + + table = bgp_dest_table(dest); + + return bmp_process(bgp, table->afi, table->safi, dest, path->peer, !valid); +} + static void bmp_stat_put_u32(struct stream *s, size_t *cnt, uint16_t type, uint32_t value) { @@ -1729,6 +1928,22 @@ static void bmp_stat_put_u64(struct stream *s, size_t *cnt, uint16_t type, static void bmp_stats(struct event *thread) { struct bmp_targets *bt = EVENT_ARG(thread); + struct bmp_imported_bgp *bib; + struct bgp *bgp; + + if (bt->stat_msec) + event_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec, &bt->t_stats); + + bmp_stats_per_instance(bt->bgp, bt); + frr_each (bmp_imported_bgps, &bt->imported_bgps, bib) { + bgp = bgp_lookup_by_name(bib->name); + if (bgp) + bmp_stats_per_instance(bgp, bt); + } +} + +static void bmp_stats_per_instance(struct bgp *bgp, struct bmp_targets *bt) +{ struct stream *s; struct peer *peer; struct listnode *node; @@ -1736,14 +1951,10 @@ static void bmp_stats(struct event *thread) uint8_t peer_type_flag; uint64_t peer_distinguisher = 0; - if (bt->stat_msec) - event_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec, - &bt->t_stats); - gettimeofday(&tv, NULL); /* Walk down all peers */ - for (ALL_LIST_ELEMENTS_RO(bt->bgp->peer, node, peer)) { + for (ALL_LIST_ELEMENTS_RO(bgp->peer, node, peer)) { size_t count = 0, count_pos, len; if (!peer_established(peer->connection)) @@ -1959,6 +2170,7 @@ static struct bmp_bgp *bmp_bgp_get(struct bgp *bgp) bmpbgp->bgp = bgp; bmpbgp->vrf_state = vrf_state_unknown; bmpbgp->mirror_qsizelimit = ~0UL; + bmp_targets_init(&bmpbgp->targets); bmp_mirrorq_init(&bmpbgp->mirrorq); bmp_bgph_add(&bmp_bgph, bmpbgp); @@ -2013,11 +2225,16 @@ static void bmp_bgp_peer_vrf(struct bmp_bgp_peer *bbpeer, struct bgp *bgp) size_t open_len = stream_get_endp(s); bbpeer->open_rx_len = open_len; + if (bbpeer->open_rx) + XFREE(MTYPE_BMP_OPEN, bbpeer->open_rx); bbpeer->open_rx = XMALLOC(MTYPE_BMP_OPEN, open_len); memcpy(bbpeer->open_rx, s->data, open_len); bbpeer->open_tx_len = open_len; - bbpeer->open_tx = bbpeer->open_rx; + if (bbpeer->open_tx) + XFREE(MTYPE_BMP_OPEN, bbpeer->open_tx); + bbpeer->open_tx = XMALLOC(MTYPE_BMP_OPEN, open_len); + memcpy(bbpeer->open_tx, s->data, open_len); stream_free(s); } @@ -2028,36 +2245,37 @@ static void bmp_bgp_peer_vrf(struct bmp_bgp_peer *bbpeer, struct bgp *bgp) * * returns true if state has changed */ -bool bmp_bgp_update_vrf_status(struct bmp_bgp *bmpbgp, enum bmp_vrf_state force) +bool bmp_bgp_update_vrf_status(enum bmp_vrf_state *vrf_state, struct bgp *bgp, + enum bmp_vrf_state force) { enum bmp_vrf_state old_state; struct bmp_bgp_peer *bbpeer; struct peer *peer; struct vrf *vrf; - struct bgp *bgp; bool changed; - if (!bmpbgp || !bmpbgp->bgp) + if (!vrf_state || !bgp) return false; - bgp = bmpbgp->bgp; - old_state = bmpbgp->vrf_state; + old_state = *vrf_state; vrf = bgp_vrf_lookup_by_instance_type(bgp); - bmpbgp->vrf_state = force != vrf_state_unknown ? force - : vrf_is_enabled(vrf) ? vrf_state_up - : vrf_state_down; + *vrf_state = force != vrf_state_unknown ? force + : vrf_is_enabled(vrf) ? vrf_state_up + : vrf_state_down; - changed = old_state != bmpbgp->vrf_state; + changed = old_state != *vrf_state; if (changed) { - peer = bmpbgp->bgp->peer_self; - if (bmpbgp->vrf_state == vrf_state_up) { + peer = bgp->peer_self; + if (*vrf_state == vrf_state_up) { bbpeer = bmp_bgp_peer_get(peer); bmp_bgp_peer_vrf(bbpeer, bgp); } else { bbpeer = bmp_bgp_peer_find(peer->qobj_node.nid); if (bbpeer) { + XFREE(MTYPE_BMP_OPEN, bbpeer->open_tx); XFREE(MTYPE_BMP_OPEN, bbpeer->open_rx); + XFREE(MTYPE_BMP_OPEN, bbpeer->open_tx); bmp_peerh_del(&bmp_peerh, bbpeer); XFREE(MTYPE_BMP_PEER, bbpeer); } @@ -2102,6 +2320,8 @@ static struct bmp_targets *bmp_targets_find1(struct bgp *bgp, const char *name) static struct bmp_targets *bmp_targets_get(struct bgp *bgp, const char *name) { struct bmp_targets *bt; + afi_t afi; + safi_t safi; bt = bmp_targets_find1(bgp, name); if (bt) @@ -2112,6 +2332,8 @@ static struct bmp_targets *bmp_targets_get(struct bgp *bgp, const char *name) bt->bgp = bgp; bt->bmpbgp = bmp_bgp_get(bgp); bt->stats_send_experimental = true; + FOREACH_AFI_SAFI (afi, safi) + bt->bgp_request_sync[afi][safi] = false; bmp_session_init(&bt->sessions); bmp_qhash_init(&bt->updhash); bmp_qlist_init(&bt->updlist); @@ -2119,16 +2341,25 @@ static struct bmp_targets *bmp_targets_get(struct bgp *bgp, const char *name) bmp_qlist_init(&bt->locupdlist); bmp_actives_init(&bt->actives); bmp_listeners_init(&bt->listeners); + bmp_imported_bgps_init(&bt->imported_bgps); QOBJ_REG(bt, bmp_targets); bmp_targets_add(&bt->bmpbgp->targets, bt); return bt; } +static void bmp_imported_bgp_free(struct bmp_imported_bgp *bib) +{ + if (bib->name) + XFREE(MTYPE_BMP_IMPORTED_BGP, bib->name); + XFREE(MTYPE_BMP_IMPORTED_BGP, bib); +} + static void bmp_targets_put(struct bmp_targets *bt) { struct bmp *bmp; struct bmp_active *ba; + struct bmp_imported_bgp *bib; EVENT_OFF(bt->t_stats); @@ -2143,6 +2374,10 @@ static void bmp_targets_put(struct bmp_targets *bt) bmp_targets_del(&bt->bmpbgp->targets, bt); QOBJ_UNREG(bt); + frr_each_safe (bmp_imported_bgps, &bt->imported_bgps, bib) + bmp_imported_bgp_free(bib); + + bmp_imported_bgps_fini(&bt->imported_bgps); bmp_listeners_fini(&bt->listeners); bmp_actives_fini(&bt->actives); bmp_qhash_fini(&bt->updhash); @@ -2187,6 +2422,71 @@ static struct bmp_listener *bmp_listener_get(struct bmp_targets *bt, return bl; } +static struct bmp_imported_bgp *bmp_imported_bgp_find(struct bmp_targets *bt, char *name) +{ + struct bmp_imported_bgp dummy; + + dummy.name = name; + return bmp_imported_bgps_find(&bt->imported_bgps, &dummy); +} + +static void bmp_send_all_bgp(struct peer *peer, bool down) +{ + struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp); + struct bgp *bgp_vrf; + struct listnode *node; + struct stream *s = NULL; + struct bmp_targets *bt; + + s = bmp_peerstate(peer, down); + if (!s) + return; + + if (bmpbgp) { + frr_each (bmp_targets, &bmpbgp->targets, bt) + bmp_send_bt(bt, s); + } + for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp_vrf)) { + bmpbgp = bmp_bgp_find(bgp_vrf); + if (!bmpbgp) + continue; + frr_each (bmp_targets, &bmpbgp->targets, bt) { + if (bgp_vrf == peer->bgp || !bmp_imported_bgp_find(bt, peer->bgp->name)) + continue; + bmp_send_bt(bt, s); + } + } + stream_free(s); +} + +static void bmp_imported_bgp_put(struct bmp_targets *bt, struct bmp_imported_bgp *bib) +{ + bmp_imported_bgps_del(&bt->imported_bgps, bib); + bmp_imported_bgp_free(bib); +} + +static struct bmp_imported_bgp *bmp_imported_bgp_get(struct bmp_targets *bt, char *name) +{ + struct bmp_imported_bgp *bib = bmp_imported_bgp_find(bt, name); + afi_t afi; + safi_t safi; + + if (bib) + return bib; + + bib = XCALLOC(MTYPE_BMP_IMPORTED_BGP, sizeof(*bib)); + if (name) + bib->name = XSTRDUP(MTYPE_BMP_IMPORTED_BGP, name); + bib->vrf_state = vrf_state_unknown; + FOREACH_AFI_SAFI (afi, safi) + bib->bgp_request_sync[afi][safi] = false; + + bib->targets = bt; + bmp_imported_bgps_add(&bt->imported_bgps, bib); + + return bib; +} + static void bmp_listener_start(struct bmp_listener *bl) { int sock, ret; @@ -2547,6 +2847,63 @@ DEFPY(no_bmp_targets_main, return CMD_SUCCESS; } +DEFPY(bmp_import_vrf, + bmp_import_vrf_cmd, + "[no] bmp import-vrf-view VRFNAME$vrfname", + NO_STR + BMP_STR + "Import BMP information from another VRF\n" + "Specify the VRF or view instance name\n") +{ + VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt); + struct bmp_imported_bgp *bib; + struct bgp *bgp; + struct bmp *bmp; + afi_t afi; + safi_t safi; + + if (!bt->bgp) { + vty_out(vty, "%% BMP target, BGP instance not found\n"); + return CMD_WARNING; + } + if ((bt->bgp->name == NULL && vrfname == NULL) || + (bt->bgp->name && vrfname && strmatch(vrfname, bt->bgp->name))) { + vty_out(vty, "%% BMP target, can not import our own BGP instance\n"); + return CMD_WARNING; + } + if (no) { + bib = bmp_imported_bgp_find(bt, (char *)vrfname); + if (!bib) { + vty_out(vty, "%% BMP imported BGP instance not found\n"); + return CMD_WARNING; + } + bgp = bgp_lookup_by_name(bib->name); + if (!bgp) + return CMD_WARNING; + bmp_send_peerdown_vrf_per_instance(bt, bgp); + bmp_imported_bgp_put(bt, bib); + return CMD_SUCCESS; + } + bib = bmp_imported_bgp_find(bt, (char *)vrfname); + if (bib) + return CMD_SUCCESS; + + bib = bmp_imported_bgp_get(bt, (char *)vrfname); + bgp = bgp_lookup_by_name(bib->name); + if (!bgp) + return CMD_SUCCESS; + + frr_each (bmp_session, &bt->sessions, bmp) { + if (bmp->state != BMP_PeerUp && bmp->state != BMP_Run) + continue; + bmp_send_peerup_per_instance(bmp, bgp); + bmp_send_peerup_vrf_per_instance(bmp, &bib->vrf_state, bgp); + FOREACH_AFI_SAFI (afi, safi) + bmp_update_syncro(bmp, afi, safi, bgp); + } + return CMD_SUCCESS; +} + DEFPY(bmp_listener_main, bmp_listener_cmd, "bmp listener <X:X::X:X|A.B.C.D> port (1-65535)", @@ -2749,19 +3106,8 @@ DEFPY(bmp_monitor_cfg, bmp_monitor_cmd, if (prev == bt->afimon[afi][safi]) return CMD_SUCCESS; - frr_each (bmp_session, &bt->sessions, bmp) { - if (bmp->syncafi == afi && bmp->syncsafi == safi) { - bmp->syncafi = AFI_MAX; - bmp->syncsafi = SAFI_MAX; - } - - if (!bt->afimon[afi][safi]) { - bmp->afistate[afi][safi] = BMP_AFI_INACTIVE; - continue; - } - - bmp->afistate[afi][safi] = BMP_AFI_NEEDSYNC; - } + frr_each (bmp_session, &bt->sessions, bmp) + bmp_update_syncro(bmp, afi, safi, NULL); return CMD_SUCCESS; } @@ -2984,6 +3330,7 @@ static int bmp_config_write(struct bgp *bgp, struct vty *vty) struct bmp_targets *bt; struct bmp_listener *bl; struct bmp_active *ba; + struct bmp_imported_bgp *bib; afi_t afi; safi_t safi; @@ -3026,6 +3373,11 @@ static int bmp_config_write(struct bgp *bgp, struct vty *vty) vty_out(vty, " bmp monitor %s %s loc-rib\n", afi2str_lower(afi), safi2str(safi)); } + + frr_each (bmp_imported_bgps, &bt->imported_bgps, bib) + vty_out(vty, " bmp import-vrf-view %s\n", + bib->name ? bib->name : VRF_DEFAULT_NAME); + frr_each (bmp_listeners, &bt->listeners, bl) vty_out(vty, " bmp listener %pSU port %d\n", &bl->addr, bl->port); @@ -3063,6 +3415,7 @@ static int bgp_bmp_init(struct event_loop *tm) install_element(BMP_NODE, &bmp_stats_cmd); install_element(BMP_NODE, &bmp_monitor_cmd); install_element(BMP_NODE, &bmp_mirror_cmd); + install_element(BMP_NODE, &bmp_import_vrf_cmd); install_element(BGP_NODE, &bmp_mirror_limit_cmd); install_element(BGP_NODE, &no_bmp_mirror_limit_cmd); @@ -3078,11 +3431,14 @@ static int bmp_route_update(struct bgp *bgp, afi_t afi, safi_t safi, struct bgp_path_info *old_route, struct bgp_path_info *new_route) { - bool is_locribmon_enabled = false; bool is_withdraw = old_route && !new_route; struct bgp_path_info *updated_route = is_withdraw ? old_route : new_route; - + struct bmp_bgp *bmpbgp; + struct bmp_targets *bt; + int ret = 0; + struct bgp *bgp_vrf; + struct listnode *node; /* this should never happen */ if (!updated_route) { @@ -3090,20 +3446,30 @@ static int bmp_route_update(struct bgp *bgp, afi_t afi, safi_t safi, return 0; } - struct bmp_bgp *bmpbgp = bmp_bgp_get(bgp); - struct peer *peer = updated_route->peer; - struct bmp_targets *bt; - struct bmp *bmp; - - frr_each (bmp_targets, &bmpbgp->targets, bt) { - if (CHECK_FLAG(bt->afimon[afi][safi], BMP_MON_LOC_RIB)) { - is_locribmon_enabled = true; - break; + for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp_vrf)) { + bmpbgp = bmp_bgp_find(bgp_vrf); + if (!bmpbgp) + continue; + frr_each (bmp_targets, &bmpbgp->targets, bt) { + if (!CHECK_FLAG(bt->afimon[afi][safi], BMP_MON_LOC_RIB)) + continue; + if (bgp_vrf != bgp && !bmp_imported_bgp_find(bt, bgp->name)) + continue; + ret = bmp_route_update_bgpbmp(bt, afi, safi, bn, old_route, new_route); } } + return ret; +} - if (!is_locribmon_enabled) - return 0; +static int bmp_route_update_bgpbmp(struct bmp_targets *bt, afi_t afi, safi_t safi, + struct bgp_dest *bn, struct bgp_path_info *old_route, + struct bgp_path_info *new_route) +{ + bool is_withdraw = old_route && !new_route; + struct bgp_path_info *updated_route = is_withdraw ? old_route : new_route; + struct peer *peer = updated_route->peer; + struct bmp *bmp; + struct bmp_queue_entry *last_item; /* route is not installed in locrib anymore and rib uptime was saved */ if (old_route && old_route->extra) @@ -3117,26 +3483,20 @@ static int bmp_route_update(struct bgp *bgp, afi_t afi, safi_t safi, bgp_path_info_extra_get(new_route)->bgp_rib_uptime = monotime(NULL); - frr_each (bmp_targets, &bmpbgp->targets, bt) { - if (CHECK_FLAG(bt->afimon[afi][safi], BMP_MON_LOC_RIB)) { - - struct bmp_queue_entry *last_item = bmp_process_one( - bt, &bt->locupdhash, &bt->locupdlist, bgp, afi, - safi, bn, peer); + last_item = bmp_process_one(bt, &bt->locupdhash, &bt->locupdlist, bt->bgp, afi, safi, bn, + peer); - /* if bmp_process_one returns NULL - * we don't have anything to do next - */ - if (!last_item) - continue; + /* if bmp_process_one returns NULL + * we don't have anything to do next + */ + if (!last_item) + return 0; - frr_each (bmp_session, &bt->sessions, bmp) { - if (!bmp->locrib_queuepos) - bmp->locrib_queuepos = last_item; + frr_each (bmp_session, &bt->sessions, bmp) { + if (!bmp->locrib_queuepos) + bmp->locrib_queuepos = last_item; - pullwr_bump(bmp->pullwr); - }; - } + pullwr_bump(bmp->pullwr); }; return 0; @@ -3149,19 +3509,150 @@ static int bgp_bmp_early_fini(void) return 0; } -/* called when a bgp instance goes up/down, implying that the underlying VRF - * has been created or deleted in zebra - */ -static int bmp_vrf_state_changed(struct bgp *bgp) +static int bmp_bgp_attribute_updated_instance(struct bmp_targets *bt, enum bmp_vrf_state *vrf_state, + struct bgp *bgp, bool withdraw, struct stream *s) +{ + bmp_bgp_update_vrf_status(vrf_state, bgp, vrf_state_unknown); + if (*vrf_state == vrf_state_down) + /* do not send peer events, router id will not be enough to set state to up + */ + return 0; + + /* vrf_state is up: trigger a peer event + */ + bmp_send_bt(bt, s); + return 1; +} + +/* called when the routerid of an instance changes */ +static int bmp_bgp_attribute_updated(struct bgp *bgp, bool withdraw) { struct bmp_bgp *bmpbgp = bmp_bgp_find(bgp); + struct bgp *bgp_vrf; + struct bmp_targets *bt; + struct listnode *node; + struct bmp_imported_bgp *bib; + int ret = 0; + struct stream *s = bmp_peerstate(bgp->peer_self, withdraw); + struct bmp *bmp; + afi_t afi; + safi_t safi; - if (!bmp_bgp_update_vrf_status(bmpbgp, vrf_state_unknown)) - return 1; + if (!s) + return 0; - bmp_send_all_safe(bmpbgp, - bmp_peerstate(bgp->peer_self, bmpbgp->vrf_state == vrf_state_down)); + if (bmpbgp) { + frr_each (bmp_targets, &bmpbgp->targets, bt) { + ret = bmp_bgp_attribute_updated_instance(bt, &bmpbgp->vrf_state, bgp, + withdraw, s); + if (withdraw) + continue; + frr_each (bmp_session, &bt->sessions, bmp) { + bmp_send_peerup_per_instance(bmp, bgp); + FOREACH_AFI_SAFI (afi, safi) + bmp_update_syncro(bmp, afi, safi, bgp); + } + } + } + for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp_vrf)) { + if (bgp == bgp_vrf) + continue; + bmpbgp = bmp_bgp_find(bgp_vrf); + if (!bmpbgp) + continue; + frr_each (bmp_targets, &bmpbgp->targets, bt) { + frr_each (bmp_imported_bgps, &bt->imported_bgps, bib) { + if (bgp_lookup_by_name(bib->name) != bgp) + continue; + ret += bmp_bgp_attribute_updated_instance(bt, &bib->vrf_state, bgp, + withdraw, s); + if (withdraw) + continue; + frr_each (bmp_session, &bt->sessions, bmp) { + bmp_send_peerup_per_instance(bmp, bgp); + FOREACH_AFI_SAFI (afi, safi) { + bmp_update_syncro(bmp, afi, safi, bgp); + } + } + } + } + } + stream_free(s); + return 1; +} + +static int bmp_routerid_update(struct bgp *bgp, bool withdraw) +{ + return bmp_bgp_attribute_updated(bgp, withdraw); +} + +static int bmp_route_distinguisher_update(struct bgp *bgp, afi_t afi, bool preconfig) +{ + return bmp_bgp_attribute_updated(bgp, preconfig); +} + +static void _bmp_vrf_state_changed_internal(struct bgp *bgp, enum bmp_vrf_state vrf_state) +{ + struct bmp_bgp *bmpbgp = bmp_bgp_find(bgp); + struct bgp *bgp_vrf; + struct bmp_targets *bt; + struct listnode *node; + struct bmp_imported_bgp *bib; + struct bmp *bmp; + afi_t afi; + safi_t safi; + + if (bmpbgp && bmp_bgp_update_vrf_status(&bmpbgp->vrf_state, bgp, vrf_state)) { + bmp_send_all_safe(bmpbgp, bmp_peerstate(bgp->peer_self, + bmpbgp->vrf_state == vrf_state_down)); + if (vrf_state == vrf_state_up && bmpbgp->vrf_state == vrf_state_up) { + frr_each (bmp_targets, &bmpbgp->targets, bt) { + frr_each (bmp_session, &bt->sessions, bmp) { + bmp_send_peerup_per_instance(bmp, bgp); + FOREACH_AFI_SAFI (afi, safi) + bmp_update_syncro(bmp, afi, safi, bgp); + } + } + } + } + + for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp_vrf)) { + bmpbgp = bmp_bgp_find(bgp_vrf); + if (!bmpbgp) + continue; + if (bgp_vrf == bgp) + continue; + frr_each (bmp_targets, &bmpbgp->targets, bt) { + frr_each (bmp_imported_bgps, &bt->imported_bgps, bib) { + if (bgp_lookup_by_name(bib->name) != bgp) + continue; + if (bmp_bgp_update_vrf_status(&bib->vrf_state, bgp, vrf_state)) { + bmp_send_bt_safe(bt, bmp_peerstate(bgp->peer_self, + bib->vrf_state == + vrf_state_down)); + if (vrf_state == vrf_state_up && + bib->vrf_state == vrf_state_up) { + frr_each (bmp_session, &bt->sessions, bmp) { + bmp_send_peerup_per_instance(bmp, bgp); + FOREACH_AFI_SAFI (afi, safi) + bmp_update_syncro(bmp, afi, safi, + bgp); + } + } + break; + } + } + } + } +} + +/* called when a bgp instance goes up/down, implying that the underlying VRF + * has been created or deleted in zebra + */ +static int bmp_vrf_state_changed(struct bgp *bgp) +{ + _bmp_vrf_state_changed_internal(bgp, vrf_state_unknown); return 0; } @@ -3170,7 +3661,6 @@ static int bmp_vrf_state_changed(struct bgp *bgp) */ static int bmp_vrf_itf_state_changed(struct bgp *bgp, struct interface *itf) { - struct bmp_bgp *bmpbgp; enum bmp_vrf_state new_state; /* if the update is not about the vrf device double-check @@ -3179,10 +3669,8 @@ static int bmp_vrf_itf_state_changed(struct bgp *bgp, struct interface *itf) if (!itf || !if_is_vrf(itf)) return bmp_vrf_state_changed(bgp); - bmpbgp = bmp_bgp_find(bgp); new_state = if_is_up(itf) ? vrf_state_up : vrf_state_down; - if (bmp_bgp_update_vrf_status(bmpbgp, new_state)) - bmp_send_all(bmpbgp, bmp_peerstate(bgp->peer_self, new_state == vrf_state_down)); + _bmp_vrf_state_changed_internal(bgp, new_state); return 0; } @@ -3194,6 +3682,7 @@ static int bgp_bmp_module_init(void) hook_register(peer_status_changed, bmp_peer_status_changed); hook_register(peer_backward_transition, bmp_peer_backward); hook_register(bgp_process, bmp_process); + hook_register(bgp_nht_path_update, bmp_nht_path_valid); hook_register(bgp_inst_config_write, bmp_config_write); hook_register(bgp_inst_delete, bmp_bgp_del); hook_register(frr_late_init, bgp_bmp_init); @@ -3201,6 +3690,8 @@ static int bgp_bmp_module_init(void) hook_register(frr_early_fini, bgp_bmp_early_fini); hook_register(bgp_instance_state, bmp_vrf_state_changed); hook_register(bgp_vrf_status_changed, bmp_vrf_itf_state_changed); + hook_register(bgp_routerid_update, bmp_routerid_update); + hook_register(bgp_route_distinguisher_update, bmp_route_distinguisher_update); return 0; } diff --git a/bgpd/bgp_bmp.h b/bgpd/bgp_bmp.h index d45a4278f6..d81b8f9b05 100644 --- a/bgpd/bgp_bmp.h +++ b/bgpd/bgp_bmp.h @@ -92,7 +92,7 @@ struct bmp_mirrorq { uint8_t data[0]; }; -enum { +enum bmp_afi_state { BMP_AFI_INACTIVE = 0, BMP_AFI_NEEDSYNC, BMP_AFI_SYNC, @@ -148,6 +148,7 @@ struct bmp { uint64_t syncpeerid; afi_t syncafi; safi_t syncsafi; + struct bgp *sync_bgp; }; /* config & state for an active outbound connection. When the connection @@ -195,6 +196,9 @@ struct bmp_listener { int sock; }; +/* config for imported bgp instances */ +PREDECL_SORTLIST_UNIQ(bmp_imported_bgps); + /* bmp_targets - plural since it may contain multiple bmp_listener & * bmp_active items. If they have the same config, BMP session should be * put in the same targets since that's a bit more effective. @@ -206,6 +210,7 @@ struct bmp_targets { struct bmp_bgp *bmpbgp; struct bgp *bgp; + bool bgp_request_sync[AFI_MAX][SAFI_MAX]; char *name; struct bmp_listeners_head listeners; @@ -238,6 +243,8 @@ struct bmp_targets { struct bmp_qhash_head locupdhash; struct bmp_qlist_head locupdlist; + struct bmp_imported_bgps_head imported_bgps; + uint64_t cnt_accept, cnt_aclrefused; bool stats_send_experimental; @@ -274,6 +281,14 @@ enum bmp_vrf_state { vrf_state_up = 1, }; +struct bmp_imported_bgp { + struct bmp_imported_bgps_item bib; + struct bmp_targets *targets; + char *name; + enum bmp_vrf_state vrf_state; + bool bgp_request_sync[AFI_MAX][SAFI_MAX]; +}; + struct bmp_bgp { struct bmp_bgph_item bbi; @@ -289,7 +304,8 @@ struct bmp_bgp { size_t mirror_qsizelimit; }; -extern bool bmp_bgp_update_vrf_status(struct bmp_bgp *bmpbgp, enum bmp_vrf_state force); +extern bool bmp_bgp_update_vrf_status(enum bmp_vrf_state *vrf_state, struct bgp *bgp, + enum bmp_vrf_state force); enum { /* RFC7854 - 10.8 */ diff --git a/bgpd/bgp_debug.c b/bgpd/bgp_debug.c index 097d3684f6..319638e412 100644 --- a/bgpd/bgp_debug.c +++ b/bgpd/bgp_debug.c @@ -60,6 +60,7 @@ unsigned long conf_bgp_debug_graceful_restart; unsigned long conf_bgp_debug_evpn_mh; unsigned long conf_bgp_debug_bfd; unsigned long conf_bgp_debug_cond_adv; +unsigned long conf_bgp_debug_aggregate; unsigned long term_bgp_debug_as4; unsigned long term_bgp_debug_neighbor_events; @@ -80,6 +81,7 @@ unsigned long term_bgp_debug_graceful_restart; unsigned long term_bgp_debug_evpn_mh; unsigned long term_bgp_debug_bfd; unsigned long term_bgp_debug_cond_adv; +unsigned long term_bgp_debug_aggregate; struct list *bgp_debug_neighbor_events_peers = NULL; struct list *bgp_debug_keepalive_peers = NULL; @@ -88,6 +90,7 @@ struct list *bgp_debug_update_in_peers = NULL; struct list *bgp_debug_update_prefixes = NULL; struct list *bgp_debug_bestpath_prefixes = NULL; struct list *bgp_debug_zebra_prefixes = NULL; +struct list *bgp_debug_aggregate_prefixes; /* messages for BGP-4 status */ const struct message bgp_status_msg[] = {{Idle, "Idle"}, @@ -1812,6 +1815,107 @@ DEFPY (no_debug_bgp_zebra_prefix, return CMD_SUCCESS; } +/* debug bgp aggregate */ +DEFPY (debug_bgp_aggregate, + debug_bgp_aggregate_cmd, + "debug bgp aggregate", + DEBUG_STR + BGP_STR + "BGP aggregate\n") +{ + if (vty->node == CONFIG_NODE) + DEBUG_ON(aggregate, AGGREGATE); + else { + TERM_DEBUG_ON(aggregate, AGGREGATE); + vty_out(vty, "BGP aggregate debugging is on\n"); + } + return CMD_SUCCESS; +} + +DEFPY (no_debug_bgp_aggregate, + no_debug_bgp_aggregate_cmd, + "no debug bgp aggregate", + NO_STR + DEBUG_STR + BGP_STR + "BGP aggregate\n") +{ + bgp_debug_list_free(bgp_debug_aggregate_prefixes); + + if (vty->node == CONFIG_NODE) + DEBUG_OFF(aggregate, AGGREGATE); + else { + TERM_DEBUG_OFF(aggregate, AGGREGATE); + vty_out(vty, "BGP aggregate debugging is off\n"); + } + return CMD_SUCCESS; +} + +DEFPY (debug_bgp_aggregate_prefix, + debug_bgp_aggregate_prefix_cmd, + "debug bgp aggregate prefix <A.B.C.D/M|X:X::X:X/M>$prefix", + DEBUG_STR + BGP_STR + "BGP aggregate\n" + "Specify a prefix to debug\n" + "IPv4 prefix\n" + "IPv6 prefix\n") +{ + if (!bgp_debug_aggregate_prefixes) + bgp_debug_aggregate_prefixes = list_new(); + + if (bgp_debug_list_has_entry(bgp_debug_aggregate_prefixes, NULL, prefix, NULL)) { + vty_out(vty, "BGP aggregate debugging is already enabled for %s\n", prefix_str); + return CMD_SUCCESS; + } + + bgp_debug_list_add_entry(bgp_debug_aggregate_prefixes, NULL, prefix, NULL); + + if (vty->node == CONFIG_NODE) + DEBUG_ON(aggregate, AGGREGATE); + else { + TERM_DEBUG_ON(aggregate, AGGREGATE); + vty_out(vty, "BGP aggregate debugging is on for %s\n", prefix_str); + } + + return CMD_SUCCESS; +} + +DEFPY (no_debug_bgp_aggregate_prefix, + no_debug_bgp_aggregate_prefix_cmd, + "no debug bgp aggregate prefix <A.B.C.D/M|X:X::X:X/M>$prefix", + NO_STR + DEBUG_STR + BGP_STR + "BGP aggregate\n" + "Specify a prefix to debug\n" + "IPv4 prefix\n" + "IPv6 prefix\n") +{ + bool found_prefix = false; + + if (bgp_debug_aggregate_prefixes && !list_isempty(bgp_debug_aggregate_prefixes)) { + found_prefix = bgp_debug_list_remove_entry(bgp_debug_aggregate_prefixes, NULL, + (struct prefix *)prefix); + + if (list_isempty(bgp_debug_aggregate_prefixes)) { + if (vty->node == CONFIG_NODE) + DEBUG_OFF(aggregate, AGGREGATE); + else { + TERM_DEBUG_OFF(aggregate, AGGREGATE); + vty_out(vty, "BGP aggregate debugging is off\n"); + } + } + } + + if (found_prefix) + vty_out(vty, "BGP aggregate debugging is off for %s\n", prefix_str); + else + vty_out(vty, "BGP aggregate debugging was not enabled for %s\n", prefix_str); + + return CMD_SUCCESS; +} + /* debug bgp update-groups */ DEFUN (debug_bgp_update_groups, debug_bgp_update_groups_cmd, @@ -2239,6 +2343,10 @@ DEFUN_NOSH (show_debugging_bgp, bgp_debug_list_print(vty, " BGP zebra debugging is on", bgp_debug_zebra_prefixes); + if (BGP_DEBUG(aggregate, AGGREGATE)) + bgp_debug_list_print(vty, " BGP aggregate debugging is on", + bgp_debug_aggregate_prefixes); + if (BGP_DEBUG(graceful_restart, GRACEFUL_RESTART)) vty_out(vty, " BGP graceful-restart debugging is on\n"); @@ -2412,6 +2520,16 @@ static int bgp_config_write_debug(struct vty *vty) write++; } + if (CONF_BGP_DEBUG(aggregate, AGGREGATE)) { + if (!bgp_debug_aggregate_prefixes || list_isempty(bgp_debug_aggregate_prefixes)) { + vty_out(vty, "debug bgp aggregate\n"); + write++; + } else { + write += bgp_debug_list_conf_print(vty, "debug bgp aggregate prefix", + bgp_debug_aggregate_prefixes); + } + } + if (hook_call(bgp_hook_config_write_debug, vty, true)) write++; @@ -2485,6 +2603,16 @@ void bgp_debug_init(void) install_element(ENABLE_NODE, &no_debug_bgp_zebra_prefix_cmd); install_element(CONFIG_NODE, &no_debug_bgp_zebra_prefix_cmd); + /* debug bgp aggregate prefix A.B.C.D/M */ + install_element(ENABLE_NODE, &debug_bgp_aggregate_cmd); + install_element(CONFIG_NODE, &debug_bgp_aggregate_cmd); + install_element(ENABLE_NODE, &no_debug_bgp_aggregate_cmd); + install_element(CONFIG_NODE, &no_debug_bgp_aggregate_cmd); + install_element(ENABLE_NODE, &debug_bgp_aggregate_prefix_cmd); + install_element(CONFIG_NODE, &debug_bgp_aggregate_prefix_cmd); + install_element(ENABLE_NODE, &no_debug_bgp_aggregate_prefix_cmd); + install_element(CONFIG_NODE, &no_debug_bgp_aggregate_prefix_cmd); + install_element(ENABLE_NODE, &no_debug_bgp_as4_cmd); install_element(CONFIG_NODE, &no_debug_bgp_as4_cmd); install_element(ENABLE_NODE, &no_debug_bgp_as4_segment_cmd); @@ -2714,6 +2842,17 @@ bool bgp_debug_zebra(const struct prefix *p) return false; } +bool bgp_debug_aggregate(const struct prefix *p) +{ + if (BGP_DEBUG(aggregate, AGGREGATE)) { + if (bgp_debug_per_prefix(p, term_bgp_debug_aggregate, BGP_DEBUG_AGGREGATE, + bgp_debug_aggregate_prefixes)) + return true; + } + + return false; +} + const char *bgp_debug_rdpfxpath2str(afi_t afi, safi_t safi, const struct prefix_rd *prd, union prefixconstptr pu, diff --git a/bgpd/bgp_debug.h b/bgpd/bgp_debug.h index 061d966dc3..11b5e52098 100644 --- a/bgpd/bgp_debug.h +++ b/bgpd/bgp_debug.h @@ -71,6 +71,7 @@ extern unsigned long conf_bgp_debug_graceful_restart; extern unsigned long conf_bgp_debug_evpn_mh; extern unsigned long conf_bgp_debug_bfd; extern unsigned long conf_bgp_debug_cond_adv; +extern unsigned long conf_bgp_debug_aggregate; extern unsigned long term_bgp_debug_as4; extern unsigned long term_bgp_debug_neighbor_events; @@ -89,6 +90,7 @@ extern unsigned long term_bgp_debug_graceful_restart; extern unsigned long term_bgp_debug_evpn_mh; extern unsigned long term_bgp_debug_bfd; extern unsigned long term_bgp_debug_cond_adv; +extern unsigned long term_bgp_debug_aggregate; extern struct list *bgp_debug_neighbor_events_peers; extern struct list *bgp_debug_keepalive_peers; @@ -97,6 +99,7 @@ extern struct list *bgp_debug_update_out_peers; extern struct list *bgp_debug_update_prefixes; extern struct list *bgp_debug_bestpath_prefixes; extern struct list *bgp_debug_zebra_prefixes; +extern struct list *bgp_debug_aggregate_prefixes; struct bgp_debug_filter { char *host; @@ -135,6 +138,7 @@ struct bgp_debug_filter { #define BGP_DEBUG_BFD_LIB 0x01 #define BGP_DEBUG_COND_ADV 0x01 +#define BGP_DEBUG_AGGREGATE 0x01 #define CONF_DEBUG_ON(a, b) (conf_bgp_debug_ ## a |= (BGP_DEBUG_ ## b)) #define CONF_DEBUG_OFF(a, b) (conf_bgp_debug_ ## a &= ~(BGP_DEBUG_ ## b)) @@ -172,6 +176,7 @@ extern bool bgp_debug_update(const struct peer *peer, const struct prefix *p, struct update_group *updgrp, unsigned int inbound); extern bool bgp_debug_bestpath(struct bgp_dest *dest); extern bool bgp_debug_zebra(const struct prefix *p); +extern bool bgp_debug_aggregate(const struct prefix *p); extern const char *bgp_debug_rdpfxpath2str( afi_t afi, safi_t safi, const struct prefix_rd *prd, diff --git a/bgpd/bgp_dump.c b/bgpd/bgp_dump.c index 53b5212482..e71835d1cf 100644 --- a/bgpd/bgp_dump.c +++ b/bgpd/bgp_dump.c @@ -480,8 +480,8 @@ static void bgp_dump_common(struct stream *obuf, struct peer *peer, stream_put(obuf, &peer->connection->su.sin.sin_addr, IPV4_MAX_BYTELEN); - if (peer->su_local) - stream_put(obuf, &peer->su_local->sin.sin_addr, + if (peer->connection->su_local) + stream_put(obuf, &peer->connection->su_local->sin.sin_addr, IPV4_MAX_BYTELEN); else stream_put(obuf, empty, IPV4_MAX_BYTELEN); @@ -494,8 +494,8 @@ static void bgp_dump_common(struct stream *obuf, struct peer *peer, stream_put(obuf, &peer->connection->su.sin6.sin6_addr, IPV6_MAX_BYTELEN); - if (peer->su_local) - stream_put(obuf, &peer->su_local->sin6.sin6_addr, + if (peer->connection->su_local) + stream_put(obuf, &peer->connection->su_local->sin6.sin6_addr, IPV6_MAX_BYTELEN); else stream_put(obuf, empty, IPV6_MAX_BYTELEN); diff --git a/bgpd/bgp_evpn_mh.c b/bgpd/bgp_evpn_mh.c index ad3625242e..b6ec8341a7 100644 --- a/bgpd/bgp_evpn_mh.c +++ b/bgpd/bgp_evpn_mh.c @@ -4737,7 +4737,7 @@ bgp_evpn_path_nh_info_new(struct bgp_path_info *pi) struct bgp_path_mh_info *mh_info; struct bgp_path_evpn_nh_info *nh_info; - e = bgp_path_info_extra_get(pi); + e = bgp_evpn_path_info_extra_get(pi); /* If mh_info doesn't exist allocate it */ mh_info = e->evpn->mh_info; diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c index d9dfc4c5eb..dc6e0d33c2 100644 --- a/bgpd/bgp_evpn_vty.c +++ b/bgpd/bgp_evpn_vty.c @@ -3108,6 +3108,7 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type, afi_t afi; safi_t safi; uint32_t prefix_cnt, path_cnt; + int first = true; afi = AFI_L2VPN; safi = SAFI_EVPN; @@ -3132,8 +3133,15 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type, prefix_rd2str((struct prefix_rd *)rd_destp, rd_str, sizeof(rd_str), bgp->asnotation); - if (json) + if (json) { + if (first) { + vty_out(vty, "\"%s\":", rd_str); + first = false; + } else { + vty_out(vty, ",\"%s\":", rd_str); + } json_rd = json_object_new_object(); + } rd_header = 1; @@ -3247,18 +3255,18 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type, } if (json) { - if (add_rd_to_json) - json_object_object_add(json, rd_str, json_rd); - else { + if (add_rd_to_json) { + vty_json_no_pretty(vty, json_rd); + } else { + vty_out(vty, "{}"); json_object_free(json_rd); - json_rd = NULL; } } } if (json) { - json_object_int_add(json, "numPrefix", prefix_cnt); - json_object_int_add(json, "numPaths", path_cnt); + vty_out(vty, ",\"numPrefix\":%u", prefix_cnt); + vty_out(vty, ",\"numPaths\":%u", path_cnt); } else { if (prefix_cnt == 0) { vty_out(vty, "No EVPN prefixes %sexist\n", @@ -3276,20 +3284,18 @@ int bgp_evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type, { json_object *json = NULL; - if (use_json) + if (use_json) { json = json_object_new_object(); + vty_out(vty, "{\n"); + } evpn_show_all_routes(vty, bgp, type, json, detail, false); - if (use_json) - /* - * We are using no_pretty here because under extremely high - * settings (lots of routes with many different paths) this can - * save several minutes of output when FRR is run on older cpu's - * or more underperforming routers out there. So for route - * scale, we need to use no_pretty json. - */ - vty_json_no_pretty(vty, json); + if (use_json) { + vty_out(vty, "}\n"); + json_object_free(json); + } + return CMD_SUCCESS; } @@ -4940,8 +4946,10 @@ DEFUN(show_bgp_l2vpn_evpn_route, if (!bgp) return CMD_WARNING; - if (uj) + if (uj) { json = json_object_new_object(); + vty_out(vty, "{\n"); + } if (bgp_evpn_cli_parse_type(&type, argv, argc) < 0) return CMD_WARNING; @@ -4954,13 +4962,10 @@ DEFUN(show_bgp_l2vpn_evpn_route, evpn_show_all_routes(vty, bgp, type, json, detail, self_orig); - /* - * This is an extremely expensive operation at scale - * and as such we need to save as much time as is - * possible. - */ - if (uj) - vty_json_no_pretty(vty, json); + if (uj) { + vty_out(vty, "}\n"); + json_object_free(json); + } return CMD_SUCCESS; } @@ -5017,10 +5022,20 @@ DEFUN(show_bgp_l2vpn_evpn_route_rd, if (bgp_evpn_cli_parse_type(&type, argv, argc) < 0) return CMD_WARNING; - if (rd_all) + if (rd_all) { + if (uj) + vty_out(vty, "{\n"); + evpn_show_all_routes(vty, bgp, type, json, 1, false); - else + + if (uj) { + vty_out(vty, "}\n"); + json_object_free(json); + return CMD_SUCCESS; + } + } else { evpn_show_route_rd(vty, bgp, &prd, type, json); + } if (uj) vty_json(vty, json); diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index 3d02214ca9..1a30cb37f4 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -1722,8 +1722,8 @@ bgp_connect_success(struct peer_connection *connection) if (bgp_debug_neighbor_events(peer)) { if (!CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER)) - zlog_debug("%s open active, local address %pSU", - peer->host, peer->su_local); + zlog_debug("%s open active, local address %pSU", peer->host, + connection->su_local); else zlog_debug("%s passive open", peer->host); } @@ -1768,8 +1768,8 @@ bgp_connect_success_w_delayopen(struct peer_connection *connection) if (bgp_debug_neighbor_events(peer)) { if (!CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER)) - zlog_debug("%s open active, local address %pSU", - peer->host, peer->su_local); + zlog_debug("%s open active, local address %pSU", peer->host, + connection->su_local); else zlog_debug("%s passive open", peer->host); } @@ -1819,14 +1819,13 @@ static void bgp_connect_in_progress_update_connection(struct peer_connection *co { struct peer *peer = connection->peer; - bgp_updatesockname(peer, connection); - if (!peer->su_remote && !BGP_CONNECTION_SU_UNSPEC(peer->connection)) { + if (!connection->su_remote && !BGP_CONNECTION_SU_UNSPEC(connection)) { /* if connect initiated, then dest port and dest addresses are well known */ - peer->su_remote = sockunion_dup(&connection->su); - if (sockunion_family(peer->su_remote) == AF_INET) - peer->su_remote->sin.sin_port = htons(peer->port); - else if (sockunion_family(peer->su_remote) == AF_INET6) - peer->su_remote->sin6.sin6_port = htons(peer->port); + connection->su_remote = sockunion_dup(&connection->su); + if (sockunion_family(connection->su_remote) == AF_INET) + connection->su_remote->sin.sin_port = htons(peer->port); + else if (sockunion_family(connection->su_remote) == AF_INET6) + connection->su_remote->sin6.sin6_port = htons(peer->port); } } @@ -2164,6 +2163,9 @@ bgp_establish(struct peer_connection *connection) peer->established++; bgp_fsm_change_status(connection, Established); + if (peer->last_reset == PEER_DOWN_WAITING_OPEN) + peer->last_reset = 0; + /* bgp log-neighbor-changes of neighbor Up */ if (CHECK_FLAG(peer->bgp->flags, BGP_FLAG_LOG_NEIGHBOR_CHANGES)) { struct vrf *vrf = vrf_lookup_by_id(peer->bgp->vrf_id); diff --git a/bgpd/bgp_io.c b/bgpd/bgp_io.c index 9e9251c854..5d0f14cc5c 100644 --- a/bgpd/bgp_io.c +++ b/bgpd/bgp_io.c @@ -199,7 +199,7 @@ static int read_ibuf_work(struct peer_connection *connection) assert(ringbuf_get(ibw, pkt->data, pktsize) == pktsize); stream_set_endp(pkt, pktsize); - frrtrace(2, frr_bgp, packet_read, connection->peer, pkt); + frrtrace(2, frr_bgp, packet_read, connection, pkt); frr_with_mutex (&connection->io_mtx) { stream_fifo_push(connection->ibuf, pkt); } diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c index 9ca20c949a..9dbef791b0 100644 --- a/bgpd/bgp_main.c +++ b/bgpd/bgp_main.c @@ -327,7 +327,7 @@ static int bgp_vrf_disable(struct vrf *vrf) if (BGP_DEBUG(zebra, ZEBRA)) zlog_debug("VRF disable %s id %d", vrf->name, vrf->vrf_id); - bgp = bgp_lookup_by_name(vrf->name); + bgp = bgp_lookup_by_name_filter(vrf->name, false); if (bgp) { vpn_leak_zebra_vrf_label_withdraw(bgp, AFI_IP); diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index b96c287f86..46e529f03d 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -1312,8 +1312,8 @@ leak_update(struct bgp *to_bgp, struct bgp_dest *bn, else bgp_path_info_unset_flag(bn, new, BGP_PATH_VALID); - bgp_aggregate_increment(to_bgp, p, new, afi, safi); bgp_path_info_add(bn, new); + bgp_aggregate_increment(to_bgp, p, new, afi, safi); bgp_process(to_bgp, bn, new, afi, safi); @@ -1951,7 +1951,7 @@ void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */ * because of loop checking. */ if (new_info) - vpn_leak_to_vrf_update(from_bgp, new_info, NULL); + vpn_leak_to_vrf_update(from_bgp, new_info, NULL, path_vrf->peer); else bgp_dest_unlock_node(bn); } @@ -2143,10 +2143,10 @@ static struct bgp *bgp_lookup_by_rd(struct bgp_path_info *bpi, return NULL; } -static void vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */ +static void vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */ struct bgp *from_bgp, /* from */ - struct bgp_path_info *path_vpn, - struct prefix_rd *prd) + struct bgp_path_info *path_vpn, struct prefix_rd *prd, + struct peer *from) { const struct prefix *p = bgp_dest_get_prefix(path_vpn->net); afi_t afi = family2afi(p->family); @@ -2231,6 +2231,12 @@ static void vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */ /* Check if leaked route has our asn. If so, don't import it. */ if (CHECK_FLAG(peer->af_flags[afi][SAFI_MPLS_VPN], PEER_FLAG_ALLOWAS_IN)) aspath_loop_count = peer->allowas_in[afi][SAFI_MPLS_VPN]; + else if (peer == peer->bgp->peer_self && from) + /* If this is an import from one VRF to another and the source + * VRF's peer has allowas-in applied, respect it. + */ + aspath_loop_count = from->allowas_in[afi][SAFI_UNICAST]; + if (aspath_loop_check(path_vpn->attr->aspath, to_bgp->as) > aspath_loop_count) { for (bpi = bgp_dest_get_bgp_path_info(bn); bpi; bpi = bpi->next) { @@ -2511,9 +2517,8 @@ bool vpn_leak_to_vrf_no_retain_filter_check(struct bgp *from_bgp, return true; } -void vpn_leak_to_vrf_update(struct bgp *from_bgp, - struct bgp_path_info *path_vpn, - struct prefix_rd *prd) +void vpn_leak_to_vrf_update(struct bgp *from_bgp, struct bgp_path_info *path_vpn, + struct prefix_rd *prd, struct peer *peer) { struct listnode *mnode, *mnnode; struct bgp *bgp; @@ -2528,8 +2533,7 @@ void vpn_leak_to_vrf_update(struct bgp *from_bgp, for (ALL_LIST_ELEMENTS(bm->bgp, mnode, mnnode, bgp)) { if (!path_vpn->extra || !path_vpn->extra->vrfleak || path_vpn->extra->vrfleak->bgp_orig != bgp) { /* no loop */ - vpn_leak_to_vrf_update_onevrf(bgp, from_bgp, path_vpn, - prd); + vpn_leak_to_vrf_update_onevrf(bgp, from_bgp, path_vpn, prd, peer); } } } @@ -2728,8 +2732,8 @@ void vpn_leak_to_vrf_update_all(struct bgp *to_bgp, struct bgp *vpn_from, bpi->extra->vrfleak->bgp_orig == to_bgp) continue; - vpn_leak_to_vrf_update_onevrf(to_bgp, vpn_from, - bpi, NULL); + vpn_leak_to_vrf_update_onevrf(to_bgp, vpn_from, bpi, NULL, + bpi->peer); } } } diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h index 18639fc69b..56dd33f9b1 100644 --- a/bgpd/bgp_mplsvpn.h +++ b/bgpd/bgp_mplsvpn.h @@ -67,9 +67,8 @@ extern bool vpn_leak_to_vrf_no_retain_filter_check(struct bgp *from_bgp, struct attr *attr, afi_t afi); -extern void vpn_leak_to_vrf_update(struct bgp *from_bgp, - struct bgp_path_info *path_vpn, - struct prefix_rd *prd); +extern void vpn_leak_to_vrf_update(struct bgp *from_bgp, struct bgp_path_info *path_vpn, + struct prefix_rd *prd, struct peer *peer); extern void vpn_leak_to_vrf_withdraw(struct bgp_path_info *path_vpn); diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c index f1bea1c189..af5d815d30 100644 --- a/bgpd/bgp_network.c +++ b/bgpd/bgp_network.c @@ -484,6 +484,9 @@ static void bgp_accept(struct event *thread) /* Dynamic neighbor has been created, let it proceed */ connection1->fd = bgp_sock; + connection1->su_local = sockunion_getsockname(connection1->fd); + connection1->su_remote = sockunion_dup(&su); + if (bgp_set_socket_ttl(connection1) < 0) { peer1->last_reset = PEER_DOWN_SOCKET_ERROR; zlog_err("%s: Unable to set min/max TTL on peer %s (dynamic), error received: %s(%d)", @@ -623,7 +626,10 @@ static void bgp_accept(struct event *thread) peer->doppelganger = peer1; peer1->doppelganger = peer; + connection->fd = bgp_sock; + connection->su_local = sockunion_getsockname(connection->fd); + connection->su_remote = sockunion_dup(&su); if (bgp_set_socket_ttl(connection) < 0) if (bgp_debug_neighbor_events(peer)) @@ -857,24 +863,28 @@ enum connect_result bgp_connect(struct peer_connection *connection) peer->host, connection->fd); /* Connect to the remote peer. */ - return sockunion_connect(connection->fd, &connection->su, - htons(peer->port), ifindex); -} + enum connect_result res; -void bgp_updatesockname(struct peer *peer, struct peer_connection *connection) -{ - if (peer->su_local) { - sockunion_free(peer->su_local); - peer->su_local = NULL; - } + res = sockunion_connect(connection->fd, &connection->su, htons(peer->port), ifindex); - if (peer->su_remote) { - sockunion_free(peer->su_remote); - peer->su_remote = NULL; + if (connection->su_remote) + sockunion_free(connection->su_remote); + + connection->su_remote = sockunion_dup(&connection->su); + switch (connection->su.sa.sa_family) { + case AF_INET: + connection->su_remote->sin.sin_port = htons(peer->port); + break; + case AF_INET6: + connection->su_remote->sin6.sin6_port = htons(peer->port); + break; } - peer->su_local = sockunion_getsockname(connection->fd); - peer->su_remote = sockunion_getpeername(connection->fd); + if (connection->su_local) + sockunion_free(connection->su_local); + connection->su_local = sockunion_getsockname(connection->fd); + + return res; } /* After TCP connection is established. Get local address and port. */ @@ -882,17 +892,13 @@ int bgp_getsockname(struct peer_connection *connection) { struct peer *peer = connection->peer; - bgp_updatesockname(peer, peer->connection); - - if (!bgp_zebra_nexthop_set(peer->su_local, peer->su_remote, - &peer->nexthop, peer)) { - flog_err( - EC_BGP_NH_UPD, - "%s: nexthop_set failed, local: %pSUp remote: %pSUp update_if: %s resetting connection - intf %s", - peer->host, peer->su_local, peer->su_remote, - peer->update_if ? peer->update_if : "(None)", - peer->nexthop.ifp ? peer->nexthop.ifp->name - : "(Unknown)"); + if (!bgp_zebra_nexthop_set(connection->su_local, connection->su_remote, &peer->nexthop, + peer)) { + flog_err(EC_BGP_NH_UPD, + "%s: nexthop_set failed, local: %pSUp remote: %pSUp update_if: %s resetting connection - intf %s", + peer->host, connection->su_local, connection->su_remote, + peer->update_if ? peer->update_if : "(None)", + peer->nexthop.ifp ? peer->nexthop.ifp->name : "(Unknown)"); return -1; } return 0; diff --git a/bgpd/bgp_network.h b/bgpd/bgp_network.h index ed1a72ec89..a2f4851f1b 100644 --- a/bgpd/bgp_network.h +++ b/bgpd/bgp_network.h @@ -23,7 +23,6 @@ extern void bgp_close_vrf_socket(struct bgp *bgp); extern void bgp_close(void); extern enum connect_result bgp_connect(struct peer_connection *connection); extern int bgp_getsockname(struct peer_connection *connection); -extern void bgp_updatesockname(struct peer *peer, struct peer_connection *connection); extern int bgp_md5_set_prefix(struct bgp *bgp, struct prefix *p, const char *password); diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c index 5fda5701f3..ed689c8bac 100644 --- a/bgpd/bgp_nexthop.c +++ b/bgpd/bgp_nexthop.c @@ -869,6 +869,9 @@ static void bgp_show_nexthops_detail(struct vty *vty, struct bgp *bgp, ? bnc->ifindex_ipv6_ll : nexthop->ifindex, bgp->vrf_id)); + json_object_int_add(json_gate, "ifindex", + bnc->ifindex_ipv6_ll ? bnc->ifindex_ipv6_ll + : nexthop->ifindex); break; case NEXTHOP_TYPE_IPV4: json_object_string_addf(json_gate, "ip", "%pI4", @@ -882,6 +885,9 @@ static void bgp_show_nexthops_detail(struct vty *vty, struct bgp *bgp, ? bnc->ifindex_ipv6_ll : nexthop->ifindex, bgp->vrf_id)); + json_object_int_add(json_gate, "ifindex", + bnc->ifindex_ipv6_ll ? bnc->ifindex_ipv6_ll + : nexthop->ifindex); break; case NEXTHOP_TYPE_IPV4_IFINDEX: json_object_string_addf(json_gate, "ip", "%pI4", @@ -893,6 +899,9 @@ static void bgp_show_nexthops_detail(struct vty *vty, struct bgp *bgp, ? bnc->ifindex_ipv6_ll : nexthop->ifindex, bgp->vrf_id)); + json_object_int_add(json_gate, "ifindex", + bnc->ifindex_ipv6_ll ? bnc->ifindex_ipv6_ll + : nexthop->ifindex); break; case NEXTHOP_TYPE_BLACKHOLE: json_object_boolean_true_add(json_gate, @@ -926,13 +935,13 @@ static void bgp_show_nexthops_detail(struct vty *vty, struct bgp *bgp, vty_out(vty, " gate %pI6", &nexthop->gate.ipv6); if (nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX && bnc->ifindex_ipv6_ll) - vty_out(vty, ", if %s\n", - ifindex2ifname(bnc->ifindex_ipv6_ll, - bgp->vrf_id)); + vty_out(vty, ", if %s, ifindex %d\n", + ifindex2ifname(bnc->ifindex_ipv6_ll, bgp->vrf_id), + bnc->ifindex_ipv6_ll); else if (nexthop->ifindex) - vty_out(vty, ", if %s\n", - ifindex2ifname(nexthop->ifindex, - bgp->vrf_id)); + vty_out(vty, ", if %s, ifindex %d\n", + ifindex2ifname(nexthop->ifindex, bgp->vrf_id), + nexthop->ifindex); else vty_out(vty, "\n"); break; @@ -941,22 +950,22 @@ static void bgp_show_nexthops_detail(struct vty *vty, struct bgp *bgp, vty_out(vty, " gate %pI4", &nexthop->gate.ipv4); if (nexthop->type == NEXTHOP_TYPE_IPV4_IFINDEX && bnc->ifindex_ipv6_ll) - vty_out(vty, ", if %s\n", - ifindex2ifname(bnc->ifindex_ipv6_ll, - bgp->vrf_id)); + vty_out(vty, ", if %s, ifindex %d\n", + ifindex2ifname(bnc->ifindex_ipv6_ll, bgp->vrf_id), + bnc->ifindex_ipv6_ll); else if (nexthop->ifindex) - vty_out(vty, ", if %s\n", - ifindex2ifname(nexthop->ifindex, - bgp->vrf_id)); + vty_out(vty, ", if %s, ifindex %d\n", + ifindex2ifname(nexthop->ifindex, bgp->vrf_id), + nexthop->ifindex); else vty_out(vty, "\n"); break; case NEXTHOP_TYPE_IFINDEX: - vty_out(vty, " if %s\n", - ifindex2ifname(bnc->ifindex_ipv6_ll - ? bnc->ifindex_ipv6_ll - : nexthop->ifindex, - bgp->vrf_id)); + vty_out(vty, " if %s, ifindex %d\n", + ifindex2ifname(bnc->ifindex_ipv6_ll ? bnc->ifindex_ipv6_ll + : nexthop->ifindex, + bgp->vrf_id), + bnc->ifindex_ipv6_ll ? bnc->ifindex_ipv6_ll : nexthop->ifindex); break; case NEXTHOP_TYPE_BLACKHOLE: vty_out(vty, " blackhole\n"); @@ -970,9 +979,8 @@ static void bgp_show_nexthops_detail(struct vty *vty, struct bgp *bgp, json_object_object_add(json, "nexthops", json_gates); } -static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp, - struct bgp_nexthop_cache *bnc, bool specific, - json_object *json) +static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp, struct bgp_nexthop_cache *bnc, + bool detail, bool uj) { char buf[PREFIX2STR_BUFFER]; time_t tbuf; @@ -983,10 +991,10 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp, peer = (struct peer *)bnc->nht_info; - if (json) + if (uj) json_nexthop = json_object_new_object(); if (bnc->srte_color) { - if (json) + if (uj) json_object_int_add(json_nexthop, "srteColor", bnc->srte_color); else @@ -994,7 +1002,7 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp, } inet_ntop(bnc->prefix.family, &bnc->prefix.u.prefix, buf, sizeof(buf)); if (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_VALID)) { - if (json) { + if (uj) { json_object_boolean_true_add(json_nexthop, "valid"); json_object_boolean_true_add(json_nexthop, "complete"); json_object_int_add(json_nexthop, "igpMetric", @@ -1022,7 +1030,7 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp, } bgp_show_nexthops_detail(vty, bgp, bnc, json_nexthop); } else if (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_EVPN_INCOMPLETE)) { - if (json) { + if (uj) { json_object_boolean_true_add(json_nexthop, "valid"); json_object_boolean_false_add(json_nexthop, "complete"); json_object_int_add(json_nexthop, "igpMetric", @@ -1042,7 +1050,7 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp, } bgp_show_nexthops_detail(vty, bgp, bnc, json_nexthop); } else { - if (json) { + if (uj) { json_object_boolean_false_add(json_nexthop, "valid"); json_object_boolean_false_add(json_nexthop, "complete"); json_object_int_add(json_nexthop, "pathCount", @@ -1074,8 +1082,8 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp, } } tbuf = time(NULL) - (monotime(NULL) - bnc->last_update); - if (json) { - if (!specific) { + if (uj) { + if (detail) { json_last_update = json_object_new_object(); json_object_int_add(json_last_update, "epoch", tbuf); json_object_string_add(json_last_update, "string", @@ -1090,22 +1098,25 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp, } /* show paths dependent on nexthop, if needed. */ - if (specific) + if (detail) bgp_show_nexthop_paths(vty, bgp, bnc, json_nexthop); - if (json) - json_object_object_add(json, buf, json_nexthop); + + if (uj) { + vty_out(vty, "\"%s\":", buf); + vty_json_no_pretty(vty, json_nexthop); + } } -static void bgp_show_nexthops(struct vty *vty, struct bgp *bgp, - bool import_table, json_object *json, afi_t afi, - bool detail) +static void bgp_show_nexthops(struct vty *vty, struct bgp *bgp, bool import_table, bool uj, + afi_t afi, bool detail) { struct bgp_nexthop_cache *bnc; struct bgp_nexthop_cache_head(*tree)[AFI_MAX]; - json_object *json_afi = NULL; bool found = false; + bool firstafi = true; + bool firstnh = true; - if (!json) { + if (!uj) { if (import_table) vty_out(vty, "Current BGP import check cache:\n"); else @@ -1117,34 +1128,42 @@ static void bgp_show_nexthops(struct vty *vty, struct bgp *bgp, tree = &bgp->nexthop_cache_table; if (afi == AFI_IP || afi == AFI_IP6) { - if (json) - json_afi = json_object_new_object(); + if (uj) + vty_out(vty, "%s:{", (afi == AFI_IP) ? "\"ipv4\"" : "\"ipv6\""); frr_each (bgp_nexthop_cache, &(*tree)[afi], bnc) { - bgp_show_nexthop(vty, bgp, bnc, detail, json_afi); + if (uj) + vty_out(vty, "%s", firstnh ? "" : ","); + bgp_show_nexthop(vty, bgp, bnc, detail, uj); found = true; + firstnh = false; } - if (found && json) - json_object_object_add( - json, (afi == AFI_IP) ? "ipv4" : "ipv6", - json_afi); + if (found && uj) + vty_out(vty, "}"); return; } for (afi = AFI_IP; afi < AFI_MAX; afi++) { - if (json && (afi == AFI_IP || afi == AFI_IP6)) - json_afi = json_object_new_object(); - frr_each (bgp_nexthop_cache, &(*tree)[afi], bnc) - bgp_show_nexthop(vty, bgp, bnc, detail, json_afi); - if (json && (afi == AFI_IP || afi == AFI_IP6)) - json_object_object_add( - json, (afi == AFI_IP) ? "ipv4" : "ipv6", - json_afi); + if (afi != AFI_IP && afi != AFI_IP6) + continue; + if (uj) + vty_out(vty, "%s%s:{", firstafi ? "" : ",", + (afi == AFI_IP) ? "\"ipv4\"" : "\"ipv6\""); + firstafi = false; + firstnh = true; + frr_each (bgp_nexthop_cache, &(*tree)[afi], bnc) { + if (uj) + vty_out(vty, "%s", firstnh ? "" : ","); + bgp_show_nexthop(vty, bgp, bnc, detail, uj); + firstnh = false; + } + + if (uj) + vty_out(vty, "}"); } } -static int show_ip_bgp_nexthop_table(struct vty *vty, const char *name, - const char *nhopip_str, bool import_table, - json_object *json, afi_t afi, bool detail) +static int show_ip_bgp_nexthop_table(struct vty *vty, const char *name, const char *nhopip_str, + bool import_table, bool uj, afi_t afi, bool detail) { struct bgp *bgp; @@ -1153,7 +1172,7 @@ static int show_ip_bgp_nexthop_table(struct vty *vty, const char *name, else bgp = bgp_get_default(); if (!bgp) { - if (!json) + if (!uj) vty_out(vty, "%% No such BGP instance exist\n"); return CMD_WARNING; } @@ -1163,61 +1182,57 @@ static int show_ip_bgp_nexthop_table(struct vty *vty, const char *name, struct bgp_nexthop_cache_head (*tree)[AFI_MAX]; struct bgp_nexthop_cache *bnc; bool found = false; - json_object *json_afi = NULL; if (!str2prefix(nhopip_str, &nhop)) { - if (!json) + if (!uj) vty_out(vty, "nexthop address is malformed\n"); return CMD_WARNING; } tree = import_table ? &bgp->import_check_table : &bgp->nexthop_cache_table; - if (json) - json_afi = json_object_new_object(); + if (uj) + vty_out(vty, "%s:{", + (family2afi(nhop.family) == AFI_IP) ? "\"ipv4\"" : "\"ipv6\""); frr_each (bgp_nexthop_cache, &(*tree)[family2afi(nhop.family)], bnc) { if (prefix_cmp(&bnc->prefix, &nhop)) continue; - bgp_show_nexthop(vty, bgp, bnc, true, json_afi); + bgp_show_nexthop(vty, bgp, bnc, true, uj); found = true; } - if (json) - json_object_object_add( - json, - (family2afi(nhop.family) == AFI_IP) ? "ipv4" - : "ipv6", - json_afi); - if (!found && !json) + if (!found && !uj) vty_out(vty, "nexthop %s does not have entry\n", nhopip_str); + + if (uj) + vty_out(vty, "}"); } else - bgp_show_nexthops(vty, bgp, import_table, json, afi, detail); + bgp_show_nexthops(vty, bgp, import_table, uj, afi, detail); return CMD_SUCCESS; } -static void bgp_show_all_instances_nexthops_vty(struct vty *vty, - json_object *json, afi_t afi, - bool detail) +static void bgp_show_all_instances_nexthops_vty(struct vty *vty, bool uj, afi_t afi, bool detail) { struct listnode *node, *nnode; struct bgp *bgp; const char *inst_name; - json_object *json_instance = NULL; + bool firstinst = true; for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) { inst_name = (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) ? VRF_DEFAULT_NAME : bgp->name; - if (json) - json_instance = json_object_new_object(); + if (uj) + vty_out(vty, "%s\"%s\":{", firstinst ? "" : ",", inst_name); + else vty_out(vty, "\nInstance %s:\n", inst_name); - bgp_show_nexthops(vty, bgp, false, json_instance, afi, detail); - - if (json) - json_object_object_add(json, inst_name, json_instance); + bgp_show_nexthops(vty, bgp, false, uj, afi, detail); + firstinst = false; + if (uj) + vty_out(vty, "}"); } } @@ -1241,20 +1256,18 @@ DEFPY (show_ip_bgp_nexthop, JSON_STR) { int rc = 0; - json_object *json = NULL; afi_t afiz = AFI_UNSPEC; if (uj) - json = json_object_new_object(); + vty_out(vty, "{\n"); if (afi) afiz = bgp_vty_afi_from_str(afi); - rc = show_ip_bgp_nexthop_table(vty, vrf, nhop_str, false, json, afiz, - detail); + rc = show_ip_bgp_nexthop_table(vty, vrf, nhop_str, false, uj, afiz, detail); if (uj) - vty_json(vty, json); + vty_out(vty, "}\n"); return rc; } @@ -1271,16 +1284,14 @@ DEFPY (show_ip_bgp_import_check, JSON_STR) { int rc = 0; - json_object *json = NULL; if (uj) - json = json_object_new_object(); + vty_out(vty, "{\n"); - rc = show_ip_bgp_nexthop_table(vty, vrf, NULL, true, json, AFI_UNSPEC, - detail); + rc = show_ip_bgp_nexthop_table(vty, vrf, NULL, true, uj, AFI_UNSPEC, detail); if (uj) - vty_json(vty, json); + vty_out(vty, "}\n"); return rc; } @@ -1298,19 +1309,18 @@ DEFPY (show_ip_bgp_instance_all_nexthop, "Show detailed information\n" JSON_STR) { - json_object *json = NULL; afi_t afiz = AFI_UNSPEC; if (uj) - json = json_object_new_object(); + vty_out(vty, "{"); if (afi) afiz = bgp_vty_afi_from_str(afi); - bgp_show_all_instances_nexthops_vty(vty, json, afiz, detail); + bgp_show_all_instances_nexthops_vty(vty, uj, afiz, detail); if (uj) - vty_json(vty, json); + vty_out(vty, "}"); return CMD_SUCCESS; } diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h index 6a4a02dcc8..5679c215b1 100644 --- a/bgpd/bgp_nexthop.h +++ b/bgpd/bgp_nexthop.h @@ -82,7 +82,7 @@ struct bgp_nexthop_cache { * L3 unreachable | VALID = 0 | VALID = 0 * | INCOMPLETE = 0 | INCOMPLETE = 0 */ -#define BGP_NEXTHOP_EVPN_INCOMPLETE (1 << 7) +#define BGP_NEXTHOP_EVPN_INCOMPLETE (1 << 8) uint32_t srte_color; diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c index ed83757ea3..2ef7ec97e3 100644 --- a/bgpd/bgp_nht.c +++ b/bgpd/bgp_nht.c @@ -41,6 +41,9 @@ static void unregister_zebra_rnh(struct bgp_nexthop_cache *bnc); static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p); static void bgp_nht_ifp_initial(struct event *thread); +DEFINE_HOOK(bgp_nht_path_update, (struct bgp *bgp, struct bgp_path_info *pi, bool valid), + (bgp, pi, valid)); + static int bgp_isvalid_nexthop(struct bgp_nexthop_cache *bnc) { return (bgp_zebra_num_connects() == 0 @@ -1275,6 +1278,25 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc) } LIST_FOREACH (path, &(bnc->paths), nh_thread) { + /* + * Currently when a peer goes down, bgp immediately + * sees this via the interface events( if it is directly + * connected). And in this case it takes and puts on + * a special peer queue all path info's associated with + * but these items are not yet processed typically when + * the nexthop is being handled here. Thus we end + * up in a situation where the process Queue for BGP + * is being asked to look at the same path info multiple + * times. Let's just cut to the chase here and if + * the bnc has a peer associated with it and the path info + * being looked at uses that peer and the peer is no + * longer established we know the path_info is being + * handled elsewhere and we do not need to process + * it here at all since the pathinfo is going away + */ + if (peer && path->peer == peer && !peer_established(peer->connection)) + continue; + if (path->type == ZEBRA_ROUTE_BGP && (path->sub_type == BGP_ROUTE_NORMAL || path->sub_type == BGP_ROUTE_STATIC || @@ -1449,6 +1471,9 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc) } } + if (path_valid != bnc_is_valid_nexthop) + hook_call(bgp_nht_path_update, bgp_path, path, bnc_is_valid_nexthop); + bgp_process(bgp_path, dest, path, afi, safi); } diff --git a/bgpd/bgp_nht.h b/bgpd/bgp_nht.h index e7c6fdc281..345089ac5a 100644 --- a/bgpd/bgp_nht.h +++ b/bgpd/bgp_nht.h @@ -83,4 +83,9 @@ extern void bgp_nht_ifp_up(struct interface *ifp); extern void bgp_nht_ifp_down(struct interface *ifp); extern void bgp_nht_interface_events(struct peer *peer); + +/* called when a path becomes valid or invalid, because of nexthop tracking */ +DECLARE_HOOK(bgp_nht_path_update, (struct bgp *bgp, struct bgp_path_info *pi, bool valid), + (bgp, pi, valid)); + #endif /* _BGP_NHT_H */ diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c index c5e390b045..f8726ffff9 100644 --- a/bgpd/bgp_packet.c +++ b/bgpd/bgp_packet.c @@ -1218,7 +1218,6 @@ void bgp_capability_send(struct peer *peer, afi_t afi, safi_t safi, uint16_t len; uint32_t gr_restart_time; uint8_t addpath_afi_safi_count = 0; - bool adv_addpath_tx = false; unsigned long number_of_orfs_p; uint8_t number_of_orfs = 0; const char *capability = lookup_msg(capcode_str, capability_code, @@ -1226,6 +1225,9 @@ void bgp_capability_send(struct peer *peer, afi_t afi, safi_t safi, const char *hostname = cmd_hostname_get(); const char *domainname = cmd_domainname_get(); + if (!peer) + return; + if (!peer_established(peer->connection)) return; @@ -1383,87 +1385,6 @@ void bgp_capability_send(struct peer *peer, afi_t afi, safi_t safi, COND_FLAG(peer->cap, PEER_CAP_LLGR_ADV, action == CAPABILITY_ACTION_SET); break; - case CAPABILITY_CODE_ADDPATH: - FOREACH_AFI_SAFI (afi, safi) { - if (peer->afc[afi][safi]) { - addpath_afi_safi_count++; - - /* Only advertise addpath TX if a feature that - * will use it is - * configured */ - if (peer->addpath_type[afi][safi] != - BGP_ADDPATH_NONE) - adv_addpath_tx = true; - - /* If we have enabled labeled unicast, we MUST check - * against unicast SAFI because addpath IDs are - * allocated under unicast SAFI, the same as the RIB - * is managed in unicast SAFI. - */ - if (safi == SAFI_LABELED_UNICAST) - if (peer->addpath_type[afi][SAFI_UNICAST] != - BGP_ADDPATH_NONE) - adv_addpath_tx = true; - } - } - - stream_putc(s, action); - stream_putc(s, CAPABILITY_CODE_ADDPATH); - stream_putc(s, CAPABILITY_CODE_ADDPATH_LEN * - addpath_afi_safi_count); - - FOREACH_AFI_SAFI (afi, safi) { - if (peer->afc[afi][safi]) { - bool adv_addpath_rx = - !CHECK_FLAG(peer->af_flags[afi][safi], - PEER_FLAG_DISABLE_ADDPATH_RX); - uint8_t flags = 0; - - /* Convert AFI, SAFI to values for packet. */ - bgp_map_afi_safi_int2iana(afi, safi, &pkt_afi, - &pkt_safi); - - stream_putw(s, pkt_afi); - stream_putc(s, pkt_safi); - - if (adv_addpath_rx) { - SET_FLAG(flags, BGP_ADDPATH_RX); - SET_FLAG(peer->af_cap[afi][safi], - PEER_CAP_ADDPATH_AF_RX_ADV); - } else { - UNSET_FLAG(peer->af_cap[afi][safi], - PEER_CAP_ADDPATH_AF_RX_ADV); - } - - if (adv_addpath_tx) { - SET_FLAG(flags, BGP_ADDPATH_TX); - SET_FLAG(peer->af_cap[afi][safi], - PEER_CAP_ADDPATH_AF_TX_ADV); - if (safi == SAFI_LABELED_UNICAST) - SET_FLAG(peer->af_cap[afi] - [SAFI_UNICAST], - PEER_CAP_ADDPATH_AF_TX_ADV); - } else { - UNSET_FLAG(peer->af_cap[afi][safi], - PEER_CAP_ADDPATH_AF_TX_ADV); - } - - stream_putc(s, flags); - } - } - - if (bgp_debug_neighbor_events(peer)) - zlog_debug("%pBP sending CAPABILITY has %s %s for afi/safi: %s/%s", - peer, - action == CAPABILITY_ACTION_SET - ? "Advertising" - : "Removing", - capability, iana_afi2str(pkt_afi), - iana_safi2str(pkt_safi)); - - COND_FLAG(peer->cap, PEER_CAP_ADDPATH_ADV, - action == CAPABILITY_ACTION_SET); - break; case CAPABILITY_CODE_PATHS_LIMIT: FOREACH_AFI_SAFI (afi, safi) { if (!peer->afc[afi][safi]) @@ -1619,10 +1540,40 @@ void bgp_capability_send(struct peer *peer, afi_t afi, safi_t safi, case CAPABILITY_CODE_REFRESH: case CAPABILITY_CODE_AS4: case CAPABILITY_CODE_DYNAMIC: + case CAPABILITY_CODE_ADDPATH: case CAPABILITY_CODE_ENHANCED_RR: - case CAPABILITY_CODE_ENHE: case CAPABILITY_CODE_EXT_MESSAGE: break; + case CAPABILITY_CODE_ENHE: + FOREACH_AFI_SAFI (afi, safi) { + if (!peer->afc[afi][safi]) + continue; + + bgp_map_afi_safi_int2iana(afi, safi, &pkt_afi, &pkt_safi); + + if (CHECK_FLAG(peer->flags, PEER_FLAG_CAPABILITY_ENHE) && + peer->connection->su.sa.sa_family == AF_INET6 && afi == AFI_IP && + (safi == SAFI_UNICAST || safi == SAFI_MPLS_VPN || + safi == SAFI_LABELED_UNICAST)) { + stream_putc(s, action); + stream_putc(s, CAPABILITY_CODE_ENHE); + stream_putc(s, CAPABILITY_CODE_ENHE_LEN); + stream_putw(s, pkt_afi); + stream_putw(s, pkt_safi); + stream_putw(s, afi_int2iana(AFI_IP6)); + + COND_FLAG(peer->af_cap[AFI_IP][safi], PEER_CAP_ENHE_AF_ADV, + action == CAPABILITY_ACTION_SET); + + if (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ENHE_AF_RCV)) + COND_FLAG(peer->af_cap[afi][safi], PEER_CAP_ENHE_AF_NEGO, + action == CAPABILITY_ACTION_SET); + } + } + COND_FLAG(peer->cap, PEER_CAP_ENHE_ADV, action == CAPABILITY_ACTION_SET); + update_group_adjust_peer_afs(peer); + bgp_announce_route_all(peer); + break; case CAPABILITY_CODE_ROLE: stream_putc(s, action); stream_putc(s, CAPABILITY_CODE_ROLE); @@ -3145,102 +3096,6 @@ static int bgp_route_refresh_receive(struct peer_connection *connection, return BGP_PACKET_NOOP; } -static void bgp_dynamic_capability_addpath(uint8_t *pnt, int action, - struct capability_header *hdr, - struct peer *peer) -{ - uint8_t *data = pnt + 3; - uint8_t *end = data + hdr->length; - size_t len = end - data; - afi_t afi; - safi_t safi; - - if (action == CAPABILITY_ACTION_SET) { - if (len % CAPABILITY_CODE_ADDPATH_LEN) { - flog_warn(EC_BGP_CAPABILITY_INVALID_LENGTH, - "Add Path: Received invalid length %zu, non-multiple of 4", - len); - return; - } - - SET_FLAG(peer->cap, PEER_CAP_ADDPATH_RCV); - - while (data + CAPABILITY_CODE_ADDPATH_LEN <= end) { - afi_t afi; - safi_t safi; - iana_afi_t pkt_afi; - iana_safi_t pkt_safi; - struct bgp_addpath_capability bac; - - memcpy(&bac, data, sizeof(bac)); - pkt_afi = ntohs(bac.afi); - pkt_safi = safi_int2iana(bac.safi); - - /* If any other value (other than 1-3) is received, - * then the capability SHOULD be treated as not - * understood and ignored. - */ - if (!bac.flags || bac.flags > 3) { - flog_warn(EC_BGP_CAPABILITY_INVALID_LENGTH, - "Add Path: Received invalid send/receive value %u in Add Path capability", - bac.flags); - goto ignore; - } - - if (bgp_debug_neighbor_events(peer)) - zlog_debug("%s OPEN has %s capability for afi/safi: %s/%s%s%s", - peer->host, lookup_msg(capcode_str, hdr->code, NULL), - iana_afi2str(pkt_afi), iana_safi2str(pkt_safi), - CHECK_FLAG(bac.flags, BGP_ADDPATH_RX) ? ", receive" : "", - CHECK_FLAG(bac.flags, BGP_ADDPATH_TX) ? ", transmit" - : ""); - - if (bgp_map_afi_safi_iana2int(pkt_afi, pkt_safi, &afi, - &safi)) { - if (bgp_debug_neighbor_events(peer)) - zlog_debug("%s Addr-family %s/%s(afi/safi) not supported. Ignore the Addpath Attribute for this AFI/SAFI", - peer->host, - iana_afi2str(pkt_afi), - iana_safi2str(pkt_safi)); - goto ignore; - } else if (!peer->afc[afi][safi]) { - if (bgp_debug_neighbor_events(peer)) - zlog_debug("%s Addr-family %s/%s(afi/safi) not enabled. Ignore the AddPath capability for this AFI/SAFI", - peer->host, - iana_afi2str(pkt_afi), - iana_safi2str(pkt_safi)); - goto ignore; - } - - if (CHECK_FLAG(bac.flags, BGP_ADDPATH_RX)) - SET_FLAG(peer->af_cap[afi][safi], - PEER_CAP_ADDPATH_AF_RX_RCV); - else - UNSET_FLAG(peer->af_cap[afi][safi], - PEER_CAP_ADDPATH_AF_RX_RCV); - - if (CHECK_FLAG(bac.flags, BGP_ADDPATH_TX)) - SET_FLAG(peer->af_cap[afi][safi], - PEER_CAP_ADDPATH_AF_TX_RCV); - else - UNSET_FLAG(peer->af_cap[afi][safi], - PEER_CAP_ADDPATH_AF_TX_RCV); - -ignore: - data += CAPABILITY_CODE_ADDPATH_LEN; - } - } else { - FOREACH_AFI_SAFI (afi, safi) { - UNSET_FLAG(peer->af_cap[afi][safi], - PEER_CAP_ADDPATH_AF_RX_RCV); - UNSET_FLAG(peer->af_cap[afi][safi], - PEER_CAP_ADDPATH_AF_TX_RCV); - } - - UNSET_FLAG(peer->cap, PEER_CAP_ADDPATH_RCV); - } -} - static void bgp_dynamic_capability_paths_limit(uint8_t *pnt, int action, struct capability_header *hdr, struct peer *peer) @@ -3321,6 +3176,81 @@ ignore: } } +static void bgp_dynamic_capability_enhe(uint8_t *pnt, int action, struct capability_header *hdr, + struct peer *peer) +{ + uint8_t *data = pnt + 3; + uint8_t *end = data + hdr->length; + size_t len = end - data; + + if (data + CAPABILITY_CODE_ENHE_LEN > end) { + flog_warn(EC_BGP_CAPABILITY_INVALID_LENGTH, + "Extended NH: Received invalid length %zu, less than %d", len, + CAPABILITY_CODE_ENHE_LEN); + return; + } + + if (action == CAPABILITY_ACTION_SET) { + if (hdr->length % CAPABILITY_CODE_ENHE_LEN) { + flog_warn(EC_BGP_CAPABILITY_INVALID_LENGTH, + "Extended NH: Received invalid length %d, non-multiple of %d", + hdr->length, CAPABILITY_CODE_ENHE_LEN); + return; + } + + while (data + CAPABILITY_CODE_ENHE_LEN <= end) { + afi_t afi; + safi_t safi; + afi_t nh_afi; + struct bgp_enhe_capability bec = {}; + + memcpy(&bec, data, sizeof(bec)); + afi = ntohs(bec.afi); + safi = ntohs(bec.safi); + nh_afi = afi_iana2int(ntohs(bec.nh_afi)); + + /* RFC 5549 specifies use of this capability only for IPv4 AFI, + * with the Nexthop AFI being IPv6. A future spec may introduce + * other possibilities, so we ignore other values with a log. + * Also, only SAFI_UNICAST and SAFI_LABELED_UNICAST are currently + * supported (and expected). + */ + if (afi != AFI_IP || nh_afi != AFI_IP6 || + !(safi == SAFI_UNICAST || safi == SAFI_MPLS_VPN || + safi == SAFI_LABELED_UNICAST)) { + flog_warn(EC_BGP_CAPABILITY_INVALID_DATA, + "%s Unexpected afi/safi/next-hop afi: %s/%s/%u in Extended Next-hop capability, ignoring", + peer->host, afi2str(afi), safi2str(safi), nh_afi); + goto ignore; + } + + SET_FLAG(peer->af_cap[afi][safi], PEER_CAP_ENHE_AF_RCV); + + if (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ENHE_AF_ADV)) + SET_FLAG(peer->af_cap[afi][safi], PEER_CAP_ENHE_AF_NEGO); + +ignore: + data += CAPABILITY_CODE_ENHE_LEN; + } + + SET_FLAG(peer->cap, PEER_CAP_ENHE_RCV); + update_group_adjust_peer_afs(peer); + bgp_announce_route_all(peer); + } else { + afi_t afi; + safi_t safi; + + UNSET_FLAG(peer->cap, PEER_CAP_ENHE_RCV); + + FOREACH_AFI_SAFI (afi, safi) { + UNSET_FLAG(peer->af_cap[afi][safi], PEER_CAP_ENHE_AF_RCV); + + if (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ENHE_AF_ADV)) + UNSET_FLAG(peer->af_cap[afi][safi], PEER_CAP_ENHE_AF_NEGO); + } + } +} + static void bgp_dynamic_capability_orf(uint8_t *pnt, int action, struct capability_header *hdr, struct peer *peer) @@ -3926,9 +3856,6 @@ static int bgp_capability_msg_parse(struct peer *peer, uint8_t *pnt, case CAPABILITY_CODE_LLGR: bgp_dynamic_capability_llgr(pnt, action, hdr, peer); break; - case CAPABILITY_CODE_ADDPATH: - bgp_dynamic_capability_addpath(pnt, action, hdr, peer); - break; case CAPABILITY_CODE_PATHS_LIMIT: bgp_dynamic_capability_paths_limit(pnt, action, hdr, peer); @@ -3942,10 +3869,13 @@ static int bgp_capability_msg_parse(struct peer *peer, uint8_t *pnt, case CAPABILITY_CODE_REFRESH: case CAPABILITY_CODE_AS4: case CAPABILITY_CODE_DYNAMIC: + case CAPABILITY_CODE_ADDPATH: case CAPABILITY_CODE_ENHANCED_RR: - case CAPABILITY_CODE_ENHE: case CAPABILITY_CODE_EXT_MESSAGE: break; + case CAPABILITY_CODE_ENHE: + bgp_dynamic_capability_enhe(pnt, action, hdr, peer); + break; case CAPABILITY_CODE_ROLE: bgp_dynamic_capability_role(pnt, action, peer); break; diff --git a/bgpd/bgp_packet.h b/bgpd/bgp_packet.h index c266b17266..866b8f617d 100644 --- a/bgpd/bgp_packet.h +++ b/bgpd/bgp_packet.h @@ -8,6 +8,12 @@ #include "hook.h" +struct bgp_enhe_capability { + uint16_t afi; + uint16_t safi; + uint16_t nh_afi; +}; + DECLARE_HOOK(bgp_packet_dump, (struct peer *peer, uint8_t type, bgp_size_t size, struct stream *s), diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index 0f899d9617..e932738cd4 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -328,6 +328,19 @@ struct bgp_path_info_extra *bgp_path_info_extra_get(struct bgp_path_info *pi) return pi->extra; } +/* Get bgp_path_info extra along with evpn information for the given bgp_path_info. + * This is used for EVPN imported routes like Type-5. + */ +struct bgp_path_info_extra *bgp_evpn_path_info_extra_get(struct bgp_path_info *pi) +{ + if (!pi->extra) + pi->extra = bgp_path_info_extra_new(); + if (!pi->extra->evpn) + pi->extra->evpn = XCALLOC(MTYPE_BGP_ROUTE_EXTRA_EVPN, + sizeof(struct bgp_path_info_extra_evpn)); + return pi->extra; +} + bool bgp_path_info_has_valid_label(const struct bgp_path_info *path) { if (!BGP_PATH_INFO_NUM_LABELS(path)) @@ -649,7 +662,7 @@ static bool use_bgp_med_value(struct attr *attr, struct bgp *bgp) /* Get MED value. If MED value is missing and "bgp bestpath missing-as-worst" is specified, treat it as the worst value. */ -static uint32_t bgp_med_value(struct attr *attr, struct bgp *bgp) +uint32_t bgp_med_value(struct attr *attr, struct bgp *bgp) { if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC))) return attr->med; @@ -1576,17 +1589,17 @@ int bgp_path_info_cmp(struct bgp *bgp, struct bgp_path_info *new, } /* locally configured routes to advertise do not have su_remote */ - if (peer_new->su_remote == NULL) { + if (peer_new->connection->su_remote == NULL) { *reason = bgp_path_selection_local_configured; return 0; } - if (peer_exist->su_remote == NULL) { + if (peer_exist->connection->su_remote == NULL) { *reason = bgp_path_selection_local_configured; return 1; } - ret = sockunion_cmp(peer_new->su_remote, peer_exist->su_remote); + ret = sockunion_cmp(peer_new->connection->su_remote, peer_exist->connection->su_remote); if (ret == 1) { *reason = bgp_path_selection_neighbor_ip; @@ -2464,8 +2477,6 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, * announced to an EBGP peer (and they have the same attributes barring * their nexthop). */ - if (ibgp_to_ibgp) - SET_FLAG(attr->rmap_change_flags, BATTR_REFLECTED); #define NEXTHOP_IS_V6 \ ((safi != SAFI_ENCAP && safi != SAFI_MPLS_VPN \ @@ -3874,6 +3885,12 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest, BGP_PATH_ATTR_CHANGED); UNSET_FLAG(new_select->flags, BGP_PATH_MULTIPATH_CHG); UNSET_FLAG(new_select->flags, BGP_PATH_LINK_BW_CHG); + } else { + /* + * Ensure that on uninstall that the INSTALL_PENDING + * is no longer set + */ + UNSET_FLAG(dest->flags, BGP_NODE_FIB_INSTALL_PENDING); } /* call bmp hook for loc-rib route update / withdraw after flags were @@ -4912,6 +4929,7 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, bool force_evpn_import = false; safi_t orig_safi = safi; struct bgp_labels bgp_labels = {}; + struct bgp_route_evpn *p_evpn = evpn; uint8_t i; if (frrtrace_enabled(frr_bgp, process_update)) { @@ -4953,11 +4971,9 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, * will not be interned. In which case, it is ok to update the * attr->evpn_overlay, so that, this can be stored in adj_in. */ - if (evpn) { - if (afi == AFI_L2VPN) - bgp_attr_set_evpn_overlay(attr, evpn); - else - evpn_overlay_free(evpn); + if (evpn && afi == AFI_L2VPN) { + bgp_attr_set_evpn_overlay(attr, evpn); + p_evpn = NULL; } bgp_adj_in_set(dest, peer, attr, addpath_id, &bgp_labels); } @@ -5130,11 +5146,9 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, * attr->evpn_overlay with evpn directly. Instead memcpy * evpn to new_atr.evpn_overlay before it is interned. */ - if (soft_reconfig && evpn) { - if (afi == AFI_L2VPN) - bgp_attr_set_evpn_overlay(&new_attr, evpn); - else - evpn_overlay_free(evpn); + if (soft_reconfig && evpn && afi == AFI_L2VPN) { + bgp_attr_set_evpn_overlay(&new_attr, evpn); + p_evpn = NULL; } /* Apply incoming route-map. @@ -5303,7 +5317,8 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, bgp_dest_unlock_node(dest); bgp_attr_unintern(&attr_new); - + if (p_evpn) + evpn_overlay_free(p_evpn); return; } @@ -5468,6 +5483,8 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, ret = bgp_damp_update(pi, dest, afi, safi); if (ret == BGP_DAMP_SUPPRESSED) { bgp_dest_unlock_node(dest); + if (p_evpn) + evpn_overlay_free(p_evpn); return; } } @@ -5539,7 +5556,7 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, } if ((SAFI_MPLS_VPN == safi) && (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)) { - vpn_leak_to_vrf_update(bgp, pi, prd); + vpn_leak_to_vrf_update(bgp, pi, prd, peer); } #ifdef ENABLE_BGP_VNC @@ -5554,6 +5571,8 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, type, sub_type, NULL); } #endif + if (p_evpn) + evpn_overlay_free(p_evpn); return; } // End of implicit withdraw @@ -5592,12 +5611,12 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, /* Addpath ID */ new->addpath_rx_id = addpath_id; - /* Increment prefix */ - bgp_aggregate_increment(bgp, p, new, afi, safi); - /* Register new BGP information. */ bgp_path_info_add(dest, new); + /* Increment prefix */ + bgp_aggregate_increment(bgp, p, new, afi, safi); + /* route_node_get lock */ bgp_dest_unlock_node(dest); @@ -5633,7 +5652,7 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, } if ((SAFI_MPLS_VPN == safi) && (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)) { - vpn_leak_to_vrf_update(bgp, new, prd); + vpn_leak_to_vrf_update(bgp, new, prd, peer); } #ifdef ENABLE_BGP_VNC if (SAFI_MPLS_VPN == safi) { @@ -5648,6 +5667,8 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, } #endif + if (p_evpn) + evpn_overlay_free(p_evpn); return; /* This BGP update is filtered. Log the reason then update BGP @@ -5711,6 +5732,8 @@ filtered: } #endif + if (p_evpn) + evpn_overlay_free(p_evpn); return; } @@ -7142,8 +7165,7 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p, if (SAFI_MPLS_VPN == safi && bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) { - vpn_leak_to_vrf_update(bgp, pi, - &bgp_static->prd); + vpn_leak_to_vrf_update(bgp, pi, &bgp_static->prd, NULL); } #ifdef ENABLE_BGP_VNC if (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP || @@ -7187,12 +7209,12 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p, bgp_nexthop_reachability_check(afi, safi, new, p, dest, bgp, bgp); - /* Aggregate address increment. */ - bgp_aggregate_increment(bgp, p, new, afi, safi); - /* Register new BGP information. */ bgp_path_info_add(dest, new); + /* Aggregate address increment. */ + bgp_aggregate_increment(bgp, p, new, afi, safi); + /* route_node_get lock */ bgp_dest_unlock_node(dest); @@ -7207,7 +7229,7 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p, if (SAFI_MPLS_VPN == safi && bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) { - vpn_leak_to_vrf_update(bgp, new, &bgp_static->prd); + vpn_leak_to_vrf_update(bgp, new, &bgp_static->prd, NULL); } #ifdef ENABLE_BGP_VNC if (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP || safi == SAFI_EVPN) @@ -7934,44 +7956,6 @@ static bool aggr_unsuppress_path(struct bgp_aggregate *aggregate, return false; } -static bool bgp_aggregate_info_same(struct bgp_path_info *pi, uint8_t origin, - struct aspath *aspath, - struct community *comm, - struct ecommunity *ecomm, - struct lcommunity *lcomm) -{ - static struct aspath *ae = NULL; - enum asnotation_mode asnotation; - - asnotation = bgp_get_asnotation(NULL); - - if (!aspath) - ae = aspath_empty(asnotation); - - if (!pi) - return false; - - if (origin != pi->attr->origin) - return false; - - if (!aspath_cmp(pi->attr->aspath, (aspath) ? aspath : ae)) - return false; - - if (!community_cmp(bgp_attr_get_community(pi->attr), comm)) - return false; - - if (!ecommunity_cmp(bgp_attr_get_ecommunity(pi->attr), ecomm)) - return false; - - if (!lcommunity_cmp(bgp_attr_get_lcommunity(pi->attr), lcomm)) - return false; - - if (!CHECK_FLAG(pi->flags, BGP_PATH_VALID)) - return false; - - return true; -} - static void bgp_aggregate_install( struct bgp *bgp, afi_t afi, safi_t safi, const struct prefix *p, uint8_t origin, struct aspath *aspath, struct community *community, @@ -7980,14 +7964,18 @@ static void bgp_aggregate_install( { struct bgp_dest *dest; struct bgp_table *table; - struct bgp_path_info *pi, *orig, *new; + struct bgp_path_info *pi, *new; struct attr *attr; + bool debug = bgp_debug_aggregate(p); + + if (debug) + zlog_debug("%s: aggregate %pFX, count %lu", __func__, p, aggregate->count); table = bgp->rib[afi][safi]; dest = bgp_node_get(table, p); - for (orig = pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) + for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) if (pi->peer == bgp->peer_self && pi->type == ZEBRA_ROUTE_BGP && pi->sub_type == BGP_ROUTE_AGGREGATE) break; @@ -7996,28 +7984,40 @@ static void bgp_aggregate_install( * If we have paths with different MEDs, then don't install * (or uninstall) the aggregate route. */ - if (aggregate->match_med && aggregate->med_mismatched) + if (aggregate->match_med && aggregate->med_mismatched) { + aspath_free(aspath); + community_free(&community); + ecommunity_free(&ecommunity); + lcommunity_free(&lcommunity); + if (debug) + zlog_debug(" aggregate %pFX: med mismatch", p); goto uninstall_aggregate_route; + } if (aggregate->count > 0) { /* * If the aggregate information has not changed * no need to re-install it again. */ - if (pi && (!aggregate->rmap.changed && - bgp_aggregate_info_same(pi, origin, aspath, community, - ecommunity, lcommunity))) { + attr = bgp_attr_aggregate_intern(bgp, origin, aspath, community, ecommunity, + lcommunity, aggregate, atomic_aggregate, p); + if (!attr) { + aspath_free(aspath); + community_free(&community); + ecommunity_free(&ecommunity); + lcommunity_free(&lcommunity); bgp_dest_unlock_node(dest); + bgp_aggregate_delete(bgp, p, afi, safi, aggregate); + if (debug) + zlog_debug("%s: %pFX null attribute", __func__, p); + return; + } - if (aspath) - aspath_free(aspath); - if (community) - community_free(&community); - if (ecommunity) - ecommunity_free(&ecommunity); - if (lcommunity) - lcommunity_free(&lcommunity); - + if (pi && CHECK_FLAG(pi->flags, BGP_PATH_VALID) && attrhash_cmp(pi->attr, attr)) { + bgp_attr_unintern(&attr); + bgp_dest_unlock_node(dest); + if (debug) + zlog_debug(" aggregate %pFX: duplicate", p); return; } @@ -8027,24 +8027,10 @@ static void bgp_aggregate_install( if (pi) { bgp_path_info_delete(dest, pi); bgp_process(bgp, dest, pi, afi, safi); + if (debug) + zlog_debug(" aggregate %pFX: existing, removed", p); } - attr = bgp_attr_aggregate_intern( - bgp, origin, aspath, community, ecommunity, lcommunity, - aggregate, atomic_aggregate, p); - - if (!attr) { - aspath_free(aspath); - community_free(&community); - ecommunity_free(&ecommunity); - lcommunity_free(&lcommunity); - bgp_dest_unlock_node(dest); - bgp_aggregate_delete(bgp, p, afi, safi, aggregate); - if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) - zlog_debug("%s: %pFX null attribute", __func__, - p); - return; - } new = info_make(ZEBRA_ROUTE_BGP, BGP_ROUTE_AGGREGATE, 0, bgp->peer_self, attr, dest); @@ -8053,19 +8039,17 @@ static void bgp_aggregate_install( bgp_path_info_add(dest, new); bgp_process(bgp, dest, new, afi, safi); + if (debug) + zlog_debug(" aggregate %pFX: installed", p); } else { uninstall_aggregate_route: - for (pi = orig; pi; pi = pi->next) - if (pi->peer == bgp->peer_self - && pi->type == ZEBRA_ROUTE_BGP - && pi->sub_type == BGP_ROUTE_AGGREGATE) - break; - - /* Withdraw static BGP route from routing table. */ - if (pi) { - bgp_path_info_delete(dest, pi); - bgp_process(bgp, dest, pi, afi, safi); - } + /* Withdraw the aggregate route from routing table. */ + if (pi) { + bgp_path_info_delete(dest, pi); + bgp_process(bgp, dest, pi, afi, safi); + if (debug) + zlog_debug(" aggregate %pFX: uninstall", p); + } } bgp_dest_unlock_node(dest); @@ -8912,6 +8896,27 @@ static int bgp_aggregate_unset(struct vty *vty, const char *prefix_str, return CMD_SUCCESS; } +static bool bgp_aggregate_cmp_params(struct bgp_aggregate *aggregate, const char *rmap, + uint8_t summary_only, uint8_t as_set, uint8_t origin, + bool match_med, const char *suppress_map) +{ + if ((aggregate->origin != origin) || (aggregate->as_set != as_set) || + (aggregate->match_med != match_med) || (aggregate->summary_only != summary_only)) + return false; + + if ((!rmap && aggregate->rmap.name) || (rmap && !aggregate->rmap.name) || + (rmap && aggregate->rmap.name && !strmatch(rmap, aggregate->rmap.name))) + return false; + + if ((!suppress_map && aggregate->suppress_map_name) || + (suppress_map && !aggregate->suppress_map_name) || + (suppress_map && aggregate->suppress_map_name && + !strmatch(suppress_map, aggregate->suppress_map_name))) + return false; + + return true; +} + static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi, safi_t safi, const char *rmap, uint8_t summary_only, uint8_t as_set, @@ -8951,6 +8956,11 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi, aggregate = bgp_dest_get_bgp_aggregate_info(dest); if (aggregate) { + /* Check for duplicate configs */ + if (bgp_aggregate_cmp_params(aggregate, rmap, summary_only, as_set, origin, + match_med, suppress_map)) + return CMD_SUCCESS; + vty_out(vty, "There is already same aggregate network.\n"); /* try to remove the old entry */ ret = bgp_aggregate_unset(vty, prefix_str, afi, safi); @@ -8986,7 +8996,7 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi, } aggregate->as_set = as_set_new; - aggregate->safi = safi; + /* Override ORIGIN attribute if defined. * E.g.: Cisco and Juniper set ORIGIN for aggregated address * to IGP which is not what rfc4271 says. @@ -9000,7 +9010,6 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi, aggregate->rmap.name = XSTRDUP(MTYPE_ROUTE_MAP_NAME, rmap); aggregate->rmap.map = route_map_lookup_by_name(rmap); - aggregate->rmap.changed = true; route_map_counter_increment(aggregate->rmap.map); } @@ -9350,8 +9359,8 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p, bgp->peer_self, new_attr, bn); SET_FLAG(new->flags, BGP_PATH_VALID); - bgp_aggregate_increment(bgp, p, new, afi, SAFI_UNICAST); bgp_path_info_add(bn, new); + bgp_aggregate_increment(bgp, p, new, afi, SAFI_UNICAST); bgp_dest_unlock_node(bn); SET_FLAG(bn->flags, BGP_NODE_FIB_INSTALLED); bgp_process(bgp, bn, new, afi, SAFI_UNICAST); @@ -9931,6 +9940,9 @@ void route_vty_out(struct vty *vty, const struct prefix *p, == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) || (path->peer->conf_if)) { json_nexthop_ll = json_object_new_object(); + if (path->peer->conf_if) + json_object_string_add(json_nexthop_ll, "interface", + path->peer->conf_if); json_object_string_addf( json_nexthop_ll, "ip", "%pI6", &attr->mp_nexthop_local); @@ -10945,6 +10957,12 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn, else vty_out(vty, ", (stale)"); } + if (bgp_path_suppressed(path)) { + if (json_paths) + json_object_boolean_true_add(json_path, "suppressed"); + else + vty_out(vty, ", (suppressed)"); + } if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR))) { if (json_paths) { @@ -12042,9 +12060,8 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t sa || type == bgp_show_type_damp_neighbor) { union sockunion *su = output_arg; - if (pi->peer == NULL - || pi->peer->su_remote == NULL - || !sockunion_same(pi->peer->su_remote, su)) + if (pi->peer == NULL || pi->peer->connection->su_remote == NULL || + !sockunion_same(pi->peer->connection->su_remote, su)) continue; } if (type == bgp_show_type_cidr_only) { @@ -15106,6 +15123,8 @@ static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi, json_object *json = NULL; json_object *json_ar = NULL; bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON); + bool first = true; + struct update_subgroup *subgrp; /* Init BGP headers here so they're only displayed once * even if 'table' is 2-tier (MPLS_VPN, ENCAP, EVPN). @@ -15174,6 +15193,28 @@ static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi, else table = bgp->rib[afi][safi]; + subgrp = peer_subgroup(peer, afi, safi); + if (use_json) { + if (type == bgp_show_adj_route_advertised || type == bgp_show_adj_route_received) { + if (header1) { + int version = table ? table->version : 0; + vty_out(vty, "\"bgpTableVersion\":%d", version); + vty_out(vty, ",\"bgpLocalRouterId\":\"%pI4\"", &bgp->router_id); + vty_out(vty, ",\"defaultLocPrf\":%u", bgp->default_local_pref); + vty_out(vty, ",\"localAS\":%u", bgp->as); + if (type == bgp_show_adj_route_advertised && subgrp && + CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)) + vty_out(vty, ",\"bgpOriginatingDefaultNetwork\":\"%s\"", + (afi == AFI_IP) ? "0.0.0.0/0" : "::/0"); + } + + if (type == bgp_show_adj_route_advertised) + vty_out(vty, ",\"advertisedRoutes\": "); + if (type == bgp_show_adj_route_received) + vty_out(vty, ",\"receivedRoutes\": "); + } + } + if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP) || (safi == SAFI_EVPN)) { @@ -15192,6 +15233,7 @@ static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi, json_routes = json_object_new_object(); const struct prefix_rd *prd; + prd = (const struct prefix_rd *)bgp_dest_get_prefix( dest); @@ -15205,34 +15247,58 @@ static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi, &filtered_count_per_rd); /* Don't include an empty RD in the output! */ - if (json_routes && (output_count_per_rd > 0)) - json_object_object_add(json_ar, rd_str, - json_routes); + if (json_routes && (output_count_per_rd > 0) && use_json) { + if (type == bgp_show_adj_route_advertised || + type == bgp_show_adj_route_received) { + if (first) { + vty_out(vty, "{\"%s\":", rd_str); + first = false; + } else { + vty_out(vty, ",\"%s\":", rd_str); + } + vty_json_no_pretty(vty, json_routes); + } else { + json_object_object_add(json_ar, rd_str, json_routes); + } + } output_count += output_count_per_rd; filtered_count += filtered_count_per_rd; } - } else + if (first == false && json_routes) + vty_out(vty, "}"); + } else { show_adj_route(vty, peer, table, afi, safi, type, rmap_name, json, json_ar, show_flags, &header1, &header2, rd_str, match, &output_count, &filtered_count); + if (use_json) { + if (type == bgp_show_adj_route_advertised || + type == bgp_show_adj_route_received) { + vty_json_no_pretty(vty, json_ar); + } + } + } + if (use_json) { - if (type == bgp_show_adj_route_advertised) - json_object_object_add(json, "advertisedRoutes", - json_ar); - else + if (type == bgp_show_adj_route_advertised || type == bgp_show_adj_route_received) { + vty_out(vty, ",\"totalPrefixCounter\":%lu", output_count); + vty_out(vty, ",\"filteredPrefixCounter\":%lu", filtered_count); + json_object_free(json); + } else { + /* for bgp_show_adj_route_filtered & bgp_show_adj_route_bestpath type */ json_object_object_add(json, "receivedRoutes", json_ar); - json_object_int_add(json, "totalPrefixCounter", output_count); - json_object_int_add(json, "filteredPrefixCounter", - filtered_count); - - /* - * This is an extremely expensive operation at scale - * and non-pretty reduces memory footprint significantly. - */ - vty_json_no_pretty(vty, json); - } else if (output_count > 0) { + json_object_int_add(json, "totalPrefixCounter", output_count); + json_object_int_add(json, "filteredPrefixCounter", filtered_count); + } + + /* + * This is an extremely expensive operation at scale + * and non-pretty reduces memory footprint significantly. + */ + if ((type != bgp_show_adj_route_advertised) && (type != bgp_show_adj_route_received)) + vty_json_no_pretty(vty, json); + } else if (output_count > 0) { if (!match && filtered_count > 0) vty_out(vty, "\nTotal number of prefixes %ld (%ld filtered)\n", @@ -15335,6 +15401,7 @@ DEFPY(show_ip_bgp_instance_neighbor_advertised_route, uint16_t show_flags = 0; struct listnode *node; struct bgp *abgp; + int ret; if (detail || prefix_str) SET_FLAG(show_flags, BGP_SHOW_OPT_ROUTES_DETAIL); @@ -15376,9 +15443,22 @@ DEFPY(show_ip_bgp_instance_neighbor_advertised_route, else if (argv_find(argv, argc, "filtered-routes", &idx)) type = bgp_show_adj_route_filtered; - if (!all) - return peer_adj_routes(vty, peer, afi, safi, type, route_map, - prefix_str ? prefix : NULL, show_flags); + if (!all) { + if (uj) + if (type == bgp_show_adj_route_advertised || + type == bgp_show_adj_route_received) + vty_out(vty, "{\n"); + + ret = peer_adj_routes(vty, peer, afi, safi, type, route_map, + prefix_str ? prefix : NULL, show_flags); + if (uj) + if (type == bgp_show_adj_route_advertised || + type == bgp_show_adj_route_received) + vty_out(vty, "}\n"); + + return ret; + } + if (uj) vty_out(vty, "{\n"); diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h index 474e229575..c4cbbee0c7 100644 --- a/bgpd/bgp_route.h +++ b/bgpd/bgp_route.h @@ -449,7 +449,6 @@ struct bgp_aggregate { struct { char *name; struct route_map *map; - bool changed; } rmap; /* Suppress-count. */ @@ -493,9 +492,6 @@ struct bgp_aggregate { /* Aggregate route's as-path. */ struct aspath *aspath; - /* SAFI configuration. */ - safi_t safi; - /** MED value found in current group. */ uint32_t med_matched_value; @@ -805,6 +801,7 @@ extern void bgp_path_info_delete(struct bgp_dest *dest, struct bgp_path_info *pi); extern struct bgp_path_info_extra * bgp_path_info_extra_get(struct bgp_path_info *path); +extern struct bgp_path_info_extra *bgp_evpn_path_info_extra_get(struct bgp_path_info *path); extern bool bgp_path_info_has_valid_label(const struct bgp_path_info *path); extern void bgp_path_info_set_flag(struct bgp_dest *dest, struct bgp_path_info *path, uint32_t flag); @@ -1013,4 +1010,5 @@ extern void bgp_meta_queue_free(struct meta_queue *mq); extern int early_route_process(struct bgp *bgp, struct bgp_dest *dest); extern int other_route_process(struct bgp *bgp, struct bgp_dest *dest); extern int eoiu_marker_process(struct bgp *bgp, struct bgp_dest *dest); +extern uint32_t bgp_med_value(struct attr *attr, struct bgp *bgp); #endif /* _QUAGGA_BGP_ROUTE_H */ diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c index ce236f87b6..fa8701dc50 100644 --- a/bgpd/bgp_routemap.c +++ b/bgpd/bgp_routemap.c @@ -1303,6 +1303,61 @@ static const struct route_map_rule_cmd route_match_evpn_rd_cmd = { route_match_rd_free }; +/* `match community-limit' */ + +/* Match function should return : + * - RMAP_MATCH if the bgp update community list count + * is less or equal to the configured limit. + * - RMAP_NOMATCH if the community list count is greater than the + * configured limit. + */ +static enum route_map_cmd_result_t +route_match_community_limit(void *rule, const struct prefix *prefix, void *object) +{ + struct bgp_path_info *path = NULL; + struct community *picomm = NULL; + uint16_t count = 0; + uint16_t *limit_rule = rule; + + path = (struct bgp_path_info *)object; + + picomm = bgp_attr_get_community(path->attr); + if (picomm) + count = picomm->size; + + if (count <= *limit_rule) + return RMAP_MATCH; + + return RMAP_NOMATCH; +} + +/* Route map `community-limit' match statement. */ +static void *route_match_community_limit_compile(const char *arg) +{ + uint16_t *limit = NULL; + char *end = NULL; + + limit = XMALLOC(MTYPE_ROUTE_MAP_COMPILED, sizeof(uint16_t)); + *limit = strtoul(arg, &end, 10); + if (*end != '\0') { + XFREE(MTYPE_ROUTE_MAP_COMPILED, limit); + return NULL; + } + return limit; +} + +/* Free route map's compiled `community-limit' value. */ +static void route_match_community_limit_free(void *rule) +{ + XFREE(MTYPE_ROUTE_MAP_COMPILED, rule); +} + +/* Route map commands for community limit matching. */ +static const struct route_map_rule_cmd route_match_community_limit_cmd = { + "community-limit", route_match_community_limit, + route_match_community_limit_compile, route_match_community_limit_free +}; + static enum route_map_cmd_result_t route_set_evpn_gateway_ip(void *rule, const struct prefix *prefix, void *object) { @@ -2066,10 +2121,9 @@ route_set_ip_nexthop(void *rule, const struct prefix *prefix, void *object) BATTR_RMAP_NEXTHOP_UNCHANGED); } else if (rins->peer_address) { if ((CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IN)) && - peer->su_remote && - sockunion_family(peer->su_remote) == AF_INET) { - path->attr->nexthop.s_addr = - sockunion2ip(peer->su_remote); + peer->connection->su_remote && + sockunion_family(peer->connection->su_remote) == AF_INET) { + path->attr->nexthop.s_addr = sockunion2ip(peer->connection->su_remote); SET_FLAG(path->attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)); } else if (CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_OUT)) { /* The next hop value will be set as part of @@ -4143,9 +4197,9 @@ route_set_ipv6_nexthop_peer(void *rule, const struct prefix *pfx, void *object) path = object; peer = path->peer; - if ((CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IN)) && - peer->su_remote && sockunion_family(peer->su_remote) == AF_INET6) { - peer_address = peer->su_remote->sin6.sin6_addr; + if ((CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IN)) && peer->connection->su_remote && + sockunion_family(peer->connection->su_remote) == AF_INET6) { + peer_address = peer->connection->su_remote->sin6.sin6_addr; /* Set next hop value and length in attribute. */ if (IN6_IS_ADDR_LINKLOCAL(&peer_address)) { path->attr->mp_nexthop_local = peer_address; @@ -4684,7 +4738,6 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name, route_map_counter_increment(map); aggregate->rmap.map = map; - aggregate->rmap.changed = true; matched = true; } @@ -5709,6 +5762,25 @@ DEFPY_YANG( return nb_cli_apply_changes(vty, NULL); } +DEFPY_YANG( + match_community_limit, match_community_limit_cmd, + "[no$no] match community-limit ![(0-65535)$limit]", + NO_STR + MATCH_STR + "Match BGP community limit\n" + "Community limit number\n") +{ + const char *xpath = "./match-condition[condition='frr-bgp-route-map:match-community-limit']"; + char xpath_value[XPATH_MAXLEN]; + + nb_cli_enqueue_change(vty, xpath, no ? NB_OP_DESTROY : NB_OP_CREATE, NULL); + snprintf(xpath_value, sizeof(xpath_value), + "%s/rmap-match-condition/frr-bgp-route-map:community-limit", xpath); + + nb_cli_enqueue_change(vty, xpath_value, no ? NB_OP_DESTROY : NB_OP_MODIFY, limit_str); + return nb_cli_apply_changes(vty, NULL); +} + DEFUN_YANG( no_match_community, no_match_community_cmd, "no match community [<(1-99)|(100-500)|COMMUNITY_LIST_NAME> [<exact-match$exact|any$any>]]", @@ -7907,6 +7979,7 @@ void bgp_route_map_init(void) route_map_install_match(&route_match_evpn_vni_cmd); route_map_install_match(&route_match_evpn_route_type_cmd); route_map_install_match(&route_match_evpn_rd_cmd); + route_map_install_match(&route_match_community_limit_cmd); route_map_install_match(&route_match_evpn_default_route_cmd); route_map_install_match(&route_match_vrl_source_vrf_cmd); @@ -7979,6 +8052,7 @@ void bgp_route_map_init(void) install_element(RMAP_NODE, &no_match_alias_cmd); install_element(RMAP_NODE, &match_community_cmd); install_element(RMAP_NODE, &no_match_community_cmd); + install_element(RMAP_NODE, &match_community_limit_cmd); install_element(RMAP_NODE, &match_lcommunity_cmd); install_element(RMAP_NODE, &no_match_lcommunity_cmd); install_element(RMAP_NODE, &match_ecommunity_cmd); diff --git a/bgpd/bgp_routemap_nb.c b/bgpd/bgp_routemap_nb.c index d8fdb4fbc4..4645593441 100644 --- a/bgpd/bgp_routemap_nb.c +++ b/bgpd/bgp_routemap_nb.c @@ -166,6 +166,13 @@ const struct frr_yang_module_info frr_bgp_route_map_info = { } }, { + .xpath = "/frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:community-limit", + .cbs = { + .modify = lib_route_map_entry_match_condition_rmap_match_condition_community_limit_modify, + .destroy = lib_route_map_entry_match_condition_rmap_match_condition_community_limit_destroy, + } + }, + { .xpath = "/frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:comm-list", .cbs = { .create = lib_route_map_entry_match_condition_rmap_match_condition_comm_list_create, diff --git a/bgpd/bgp_routemap_nb.h b/bgpd/bgp_routemap_nb.h index f59686f386..45689242a0 100644 --- a/bgpd/bgp_routemap_nb.h +++ b/bgpd/bgp_routemap_nb.h @@ -72,6 +72,10 @@ int lib_route_map_entry_match_condition_rmap_match_condition_evpn_route_type_mod int lib_route_map_entry_match_condition_rmap_match_condition_evpn_route_type_destroy(struct nb_cb_destroy_args *args); int lib_route_map_entry_match_condition_rmap_match_condition_route_distinguisher_modify(struct nb_cb_modify_args *args); int lib_route_map_entry_match_condition_rmap_match_condition_route_distinguisher_destroy(struct nb_cb_destroy_args *args); +int lib_route_map_entry_match_condition_rmap_match_condition_community_limit_modify( + struct nb_cb_modify_args *args); +int lib_route_map_entry_match_condition_rmap_match_condition_community_limit_destroy( + struct nb_cb_destroy_args *args); int lib_route_map_entry_match_condition_rmap_match_condition_comm_list_create( struct nb_cb_create_args *args); int lib_route_map_entry_match_condition_rmap_match_condition_comm_list_destroy( diff --git a/bgpd/bgp_routemap_nb_config.c b/bgpd/bgp_routemap_nb_config.c index 0dca196ed6..223c416dc5 100644 --- a/bgpd/bgp_routemap_nb_config.c +++ b/bgpd/bgp_routemap_nb_config.c @@ -1275,6 +1275,57 @@ lib_route_map_entry_match_condition_rmap_match_condition_route_distinguisher_des } /* + * XPath: /frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:community-limit + */ +int lib_route_map_entry_match_condition_rmap_match_condition_community_limit_modify( + struct nb_cb_modify_args *args) +{ + struct routemap_hook_context *rhc; + const char *limit; + enum rmap_compile_rets ret; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + /* Add configuration. */ + rhc = nb_running_get_entry(args->dnode, NULL, true); + limit = yang_dnode_get_string(args->dnode, NULL); + + rhc->rhc_mhook = bgp_route_match_delete; + rhc->rhc_rule = "community-limit"; + rhc->rhc_event = RMAP_EVENT_MATCH_DELETED; + + ret = bgp_route_match_add(rhc->rhc_rmi, "community-limit", limit, + RMAP_EVENT_MATCH_ADDED, args->errmsg, args->errmsg_len); + + if (ret != RMAP_COMPILE_SUCCESS) { + rhc->rhc_mhook = NULL; + return NB_ERR_INCONSISTENCY; + } + } + + return NB_OK; +} + +int lib_route_map_entry_match_condition_rmap_match_condition_community_limit_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return lib_route_map_entry_match_destroy(args); + } + + return NB_OK; +} + +/* * XPath = /frr-route-map:lib/route-map/entry/match-condition/rmap-match-condition/frr-bgp-route-map:comm-list */ int lib_route_map_entry_match_condition_rmap_match_condition_comm_list_create( diff --git a/bgpd/bgp_script.c b/bgpd/bgp_script.c index b37385812e..4874813bed 100644 --- a/bgpd/bgp_script.c +++ b/bgpd/bgp_script.c @@ -37,9 +37,9 @@ void lua_pushpeer(lua_State *L, const struct peer *peer) lua_setfield(L, -2, "last_readtime"); lua_pushinteger(L, peer->resettime); lua_setfield(L, -2, "last_resettime"); - lua_pushsockunion(L, peer->su_local); + lua_pushsockunion(L, peer->connection->su_local); lua_setfield(L, -2, "local_address"); - lua_pushsockunion(L, peer->su_remote); + lua_pushsockunion(L, peer->connection->su_remote); lua_setfield(L, -2, "remote_address"); lua_pushinteger(L, peer->cap); lua_setfield(L, -2, "capabilities"); diff --git a/bgpd/bgp_snmp_bgp4.c b/bgpd/bgp_snmp_bgp4.c index 755777c167..32430f42a8 100644 --- a/bgpd/bgp_snmp_bgp4.c +++ b/bgpd/bgp_snmp_bgp4.c @@ -266,25 +266,23 @@ static uint8_t *bgpPeerTable(struct variable *v, oid name[], size_t *length, case BGPPEERNEGOTIATEDVERSION: return SNMP_INTEGER(BGP_VERSION_4); case BGPPEERLOCALADDR: - if (peer->su_local) - return SNMP_IPADDRESS(peer->su_local->sin.sin_addr); + if (peer->connection->su_local) + return SNMP_IPADDRESS(peer->connection->su_local->sin.sin_addr); else return SNMP_IPADDRESS(bgp_empty_addr); case BGPPEERLOCALPORT: - if (peer->su_local) - return SNMP_INTEGER( - ntohs(peer->su_local->sin.sin_port)); + if (peer->connection->su_local) + return SNMP_INTEGER(ntohs(peer->connection->su_local->sin.sin_port)); else return SNMP_INTEGER(0); case BGPPEERREMOTEADDR: - if (peer->su_remote) - return SNMP_IPADDRESS(peer->su_remote->sin.sin_addr); + if (peer->connection->su_remote) + return SNMP_IPADDRESS(peer->connection->su_remote->sin.sin_addr); else return SNMP_IPADDRESS(bgp_empty_addr); case BGPPEERREMOTEPORT: - if (peer->su_remote) - return SNMP_INTEGER( - ntohs(peer->su_remote->sin.sin_port)); + if (peer->connection->su_remote) + return SNMP_INTEGER(ntohs(peer->connection->su_remote->sin.sin_port)); else return SNMP_INTEGER(0); case BGPPEERREMOTEAS: diff --git a/bgpd/bgp_snmp_bgp4v2.c b/bgpd/bgp_snmp_bgp4v2.c index 5f36e29876..724eefe601 100644 --- a/bgpd/bgp_snmp_bgp4v2.c +++ b/bgpd/bgp_snmp_bgp4v2.c @@ -208,49 +208,42 @@ static uint8_t *bgpv2PeerTable(struct variable *v, oid name[], size_t *length, case BGP4V2_PEER_INSTANCE: return SNMP_INTEGER(peer->bgp->vrf_id); case BGP4V2_PEER_LOCAL_ADDR_TYPE: - if (peer->su_local) - return SNMP_INTEGER(peer->su_local->sa.sa_family == - AF_INET + if (peer->connection->su_local) + return SNMP_INTEGER(peer->connection->su_local->sa.sa_family == AF_INET ? AFI_IP : AFI_IP6); else return SNMP_INTEGER(0); case BGP4V2_PEER_LOCAL_ADDR: - if (peer->su_local) - if (peer->su_local->sa.sa_family == AF_INET) - return SNMP_IPADDRESS( - peer->su_local->sin.sin_addr); + if (peer->connection->su_local) + if (peer->connection->su_local->sa.sa_family == AF_INET) + return SNMP_IPADDRESS(peer->connection->su_local->sin.sin_addr); else - return SNMP_IP6ADDRESS( - peer->su_local->sin6.sin6_addr); + return SNMP_IP6ADDRESS(peer->connection->su_local->sin6.sin6_addr); else return SNMP_IPADDRESS(bgp_empty_addr); case BGP4V2_PEER_REMOTE_ADDR_TYPE: - if (peer->su_remote) - return SNMP_INTEGER(peer->su_remote->sa.sa_family == - AF_INET + if (peer->connection->su_remote) + return SNMP_INTEGER(peer->connection->su_remote->sa.sa_family == AF_INET ? AFI_IP : AFI_IP6); else return SNMP_INTEGER(0); case BGP4V2_PEER_REMOTE_ADDR: - if (peer->su_remote) - if (peer->su_remote->sa.sa_family == AF_INET) - return SNMP_IPADDRESS( - peer->su_remote->sin.sin_addr); + if (peer->connection->su_remote) + if (peer->connection->su_remote->sa.sa_family == AF_INET) + return SNMP_IPADDRESS(peer->connection->su_remote->sin.sin_addr); else - return SNMP_IP6ADDRESS( - peer->su_remote->sin6.sin6_addr); + return SNMP_IP6ADDRESS(peer->connection->su_remote->sin6.sin6_addr); else return SNMP_IPADDRESS(bgp_empty_addr); case BGP4V2_PEER_LOCAL_PORT: - if (peer->su_local) - if (peer->su_local->sa.sa_family == AF_INET) - return SNMP_INTEGER( - ntohs(peer->su_local->sin.sin_port)); + if (peer->connection->su_local) + if (peer->connection->su_local->sa.sa_family == AF_INET) + return SNMP_INTEGER(ntohs(peer->connection->su_local->sin.sin_port)); else return SNMP_INTEGER( - ntohs(peer->su_local->sin6.sin6_port)); + ntohs(peer->connection->su_local->sin6.sin6_port)); else return SNMP_INTEGER(0); case BGP4V2_PEER_LOCAL_AS: @@ -258,13 +251,13 @@ static uint8_t *bgpv2PeerTable(struct variable *v, oid name[], size_t *length, case BGP4V2_PEER_LOCAL_IDENTIFIER: return SNMP_IPADDRESS(peer->local_id); case BGP4V2_PEER_REMOTE_PORT: - if (peer->su_remote) - if (peer->su_remote->sa.sa_family == AF_INET) + if (peer->connection->su_remote) + if (peer->connection->su_remote->sa.sa_family == AF_INET) return SNMP_INTEGER( - ntohs(peer->su_remote->sin.sin_port)); + ntohs(peer->connection->su_remote->sin.sin_port)); else return SNMP_INTEGER( - ntohs(peer->su_remote->sin6.sin6_port)); + ntohs(peer->connection->su_remote->sin6.sin6_port)); else return SNMP_INTEGER(0); case BGP4V2_PEER_REMOTE_AS: diff --git a/bgpd/bgp_trace.h b/bgpd/bgp_trace.h index a77a25e435..ce86920634 100644 --- a/bgpd/bgp_trace.h +++ b/bgpd/bgp_trace.h @@ -135,12 +135,13 @@ TRACEPOINT_LOGLEVEL(frr_bgp, bmp_mirror_packet, TRACE_INFO) TRACEPOINT_EVENT( frr_bgp, bmp_eor, - TP_ARGS(afi_t, afi, safi_t, safi, uint8_t, flags, uint8_t, peer_type_flag), + TP_ARGS(afi_t, afi, safi_t, safi, uint8_t, flags, uint8_t, peer_type_flag, bgp), TP_FIELDS( ctf_integer(afi_t, afi, afi) ctf_integer(safi_t, safi, safi) ctf_integer(uint8_t, flags, flags) ctf_integer(uint8_t, peer_type_flag, peer_type_flag) + ctf_string(bgp, bgp->name_pretty) ) ) @@ -211,6 +212,24 @@ TRACEPOINT_EVENT( TRACEPOINT_LOGLEVEL(frr_bgp, bmp_process, TRACE_DEBUG) /* + * BMP is hooked for a nexthop tracking event + */ +TRACEPOINT_EVENT( + frr_bgp, + bmp_nht_path_valid, + TP_ARGS(struct bgp *, bgp, char *, pfx, struct bgp_path_info *, + path, bool, valid), + TP_FIELDS( + ctf_string(bgp, bgp->name_pretty) + ctf_string(prefix, pfx) + ctf_string(path, PEER_HOSTNAME(path->peer)) + ctf_integer(bool, valid, valid) + ) +) + +TRACEPOINT_LOGLEVEL(frr_bgp, bmp_nht_path_valid, TRACE_DEBUG) + +/* * bgp_dest_lock/bgp_dest_unlock */ TRACEPOINT_EVENT( diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c index 35ddfc34ff..c6b1ff1d2f 100644 --- a/bgpd/bgp_updgrp.c +++ b/bgpd/bgp_updgrp.c @@ -2037,13 +2037,16 @@ int update_group_adjust_soloness(struct peer *peer, int set) struct peer_group *group; struct listnode *node, *nnode; - peer_flag_set(peer, PEER_FLAG_LONESOUL); - if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { peer_lonesoul_or_not(peer, set); if (peer_established(peer->connection)) bgp_announce_route_all(peer); } else { + if (set) + peer_flag_set(peer, PEER_FLAG_LONESOUL); + else + peer_flag_unset(peer, PEER_FLAG_LONESOUL); + group = peer->group; for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) { peer_lonesoul_or_not(peer, set); diff --git a/bgpd/bgp_updgrp.h b/bgpd/bgp_updgrp.h index d0fd226d99..6549c99e8f 100644 --- a/bgpd/bgp_updgrp.h +++ b/bgpd/bgp_updgrp.h @@ -66,7 +66,6 @@ typedef struct { #define BPKT_ATTRVEC_FLAGS_UPDATED (1 << 0) #define BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS (1 << 1) -#define BPKT_ATTRVEC_FLAGS_REFLECTED (1 << 2) #define BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED (1 << 3) #define BPKT_ATTRVEC_FLAGS_RMAP_IPV4_NH_CHANGED (1 << 4) #define BPKT_ATTRVEC_FLAGS_RMAP_IPV6_GNH_CHANGED (1 << 5) diff --git a/bgpd/bgp_updgrp_adv.c b/bgpd/bgp_updgrp_adv.c index a1bf9a4c61..8f816eb30d 100644 --- a/bgpd/bgp_updgrp_adv.c +++ b/bgpd/bgp_updgrp_adv.c @@ -228,64 +228,67 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg) afi2str(afi), safi2str(safi), ctx->dest); UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { - /* withdraw stale addpath without waiting for the coalesce timer timeout. - * Otherwise, since adj->addpath_tx_id is overwritten, the code never - * notice anymore it has to do a withdrawal. - */ - if (addpath_capable) - subgrp_withdraw_stale_addpath(ctx, subgrp); - /* - * Skip the subgroups that have coalesce timer running. We will - * walk the entire prefix table for those subgroups when the - * coalesce timer fires. - */ - if (!subgrp->t_coalesce) { - - /* An update-group that uses addpath */ - if (addpath_capable) { - subgrp_announce_addpath_best_selected(ctx->dest, - subgrp); + /* An update-group that uses addpath */ + if (addpath_capable) { + /* Send withdrawals without waiting for coalesting timer + * to expire. + */ + if (subgrp->t_coalesce) { + subgrp_withdraw_stale_addpath(ctx, subgrp); - /* Process the bestpath last so the "show [ip] - * bgp neighbor x.x.x.x advertised" - * output shows the attributes from the bestpath - */ - if (ctx->pi) - subgroup_process_announce_selected( - subgrp, ctx->pi, ctx->dest, afi, - safi, - bgp_addpath_id_for_peer( - peer, afi, safi, - &ctx->pi->tx_addpath)); + goto done; } - /* An update-group that does not use addpath */ - else { - if (ctx->pi) { - subgroup_process_announce_selected( - subgrp, ctx->pi, ctx->dest, afi, - safi, - bgp_addpath_id_for_peer( - peer, afi, safi, - &ctx->pi->tx_addpath)); - } else { - /* Find the addpath_tx_id of the path we - * had advertised and - * send a withdraw */ - RB_FOREACH_SAFE (adj, bgp_adj_out_rb, - &ctx->dest->adj_out, + + subgrp_withdraw_stale_addpath(ctx, subgrp); + subgrp_announce_addpath_best_selected(ctx->dest, subgrp); + + /* Process the bestpath last so the + * "show [ip] bgp neighbor x.x.x.x advertised" output shows + * the attributes from the bestpath. + */ + if (ctx->pi) + subgroup_process_announce_selected( + subgrp, ctx->pi, ctx->dest, afi, safi, + bgp_addpath_id_for_peer(peer, afi, safi, + &ctx->pi->tx_addpath)); + } else { + /* Send withdrawals without waiting for coalesting timer + * to expire. + */ + if (subgrp->t_coalesce) { + if (!ctx->pi || CHECK_FLAG(ctx->pi->flags, BGP_PATH_UNUSEABLE)) { + RB_FOREACH_SAFE (adj, bgp_adj_out_rb, &ctx->dest->adj_out, adj_next) { if (adj->subgroup == subgrp) { subgroup_process_announce_selected( - subgrp, NULL, - ctx->dest, afi, - safi, + subgrp, NULL, ctx->dest, afi, safi, adj->addpath_tx_id); } } } + + goto done; + } + + if (ctx->pi) { + subgroup_process_announce_selected( + subgrp, ctx->pi, ctx->dest, afi, safi, + bgp_addpath_id_for_peer(peer, afi, safi, + &ctx->pi->tx_addpath)); + } else { + RB_FOREACH_SAFE (adj, bgp_adj_out_rb, &ctx->dest->adj_out, + adj_next) { + if (adj->subgroup == subgrp) { + subgroup_process_announce_selected(subgrp, NULL, + ctx->dest, afi, + safi, + adj->addpath_tx_id); + } + } } } +done: /* Notify BGP Conditional advertisement */ bgp_notify_conditional_adv_scanner(subgrp); } diff --git a/bgpd/bgp_updgrp_packet.c b/bgpd/bgp_updgrp_packet.c index 3ce136ef87..ec418f2b1d 100644 --- a/bgpd/bgp_updgrp_packet.c +++ b/bgpd/bgp_updgrp_packet.c @@ -1284,10 +1284,6 @@ bpacket_vec_arr_inherit_attr_flags(struct bpacket_attr_vec_arr *vecarr, SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags, BPKT_ATTRVEC_FLAGS_RMAP_NH_PEER_ADDRESS); - if (CHECK_FLAG(attr->rmap_change_flags, BATTR_REFLECTED)) - SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags, - BPKT_ATTRVEC_FLAGS_REFLECTED); - if (CHECK_FLAG(attr->rmap_change_flags, BATTR_RMAP_NEXTHOP_UNCHANGED)) SET_FLAG(vecarr->entries[BGP_ATTR_VEC_NH].flags, BPKT_ATTRVEC_FLAGS_RMAP_NH_UNCHANGED); diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index 550adf93db..6290e1e5b1 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -93,18 +93,6 @@ FRR_CFG_DEFAULT_BOOL(BGP_DETERMINISTIC_MED, { .val_bool = true, .match_profile = "datacenter", }, { .val_bool = false }, ); -FRR_CFG_DEFAULT_ULONG(BGP_CONNECT_RETRY, - { .val_ulong = 10, .match_profile = "datacenter", }, - { .val_ulong = BGP_DEFAULT_CONNECT_RETRY }, -); -FRR_CFG_DEFAULT_ULONG(BGP_HOLDTIME, - { .val_ulong = 9, .match_profile = "datacenter", }, - { .val_ulong = BGP_DEFAULT_KEEPALIVE }, -); -FRR_CFG_DEFAULT_ULONG(BGP_KEEPALIVE, - { .val_ulong = 3, .match_profile = "datacenter", }, - { .val_ulong = BGP_DEFAULT_KEEPALIVE }, -); FRR_CFG_DEFAULT_BOOL(BGP_EBGP_REQUIRES_POLICY, { .val_bool = false, .match_profile = "datacenter", }, { .val_bool = false, .match_version = "< 7.4", }, @@ -134,6 +122,12 @@ FRR_CFG_DEFAULT_BOOL(BGP_ENFORCE_FIRST_AS, { .val_bool = false, .match_version = "< 9.1", }, { .val_bool = true }, ); +FRR_CFG_DEFAULT_BOOL(BGP_RR_ALLOW_OUTBOUND_POLICY, + { .val_bool = false }, +); +FRR_CFG_DEFAULT_BOOL(BGP_COMPARE_AIGP, + { .val_bool = false }, +); DEFINE_HOOK(bgp_inst_config_write, (struct bgp *bgp, struct vty *vty), @@ -141,6 +135,8 @@ DEFINE_HOOK(bgp_inst_config_write, DEFINE_HOOK(bgp_snmp_update_last_changed, (struct bgp *bgp), (bgp)); DEFINE_HOOK(bgp_snmp_init_stats, (struct bgp *bgp), (bgp)); DEFINE_HOOK(bgp_snmp_traps_config_write, (struct vty * vty), (vty)); +DEFINE_HOOK(bgp_route_distinguisher_update, (struct bgp *bgp, afi_t afi, bool preconfig), + (bgp, afi, preconfig)); static struct peer_group *listen_range_exists(struct bgp *bgp, struct prefix *range, int exact); @@ -632,6 +628,10 @@ int bgp_get_vty(struct bgp **bgp, as_t *as, const char *name, BGP_FLAG_DYNAMIC_CAPABILITY); if (DFLT_BGP_ENFORCE_FIRST_AS) SET_FLAG((*bgp)->flags, BGP_FLAG_ENFORCE_FIRST_AS); + if (DFLT_BGP_RR_ALLOW_OUTBOUND_POLICY) + SET_FLAG((*bgp)->flags, BGP_FLAG_RR_ALLOW_OUTBOUND_POLICY); + if (DFLT_BGP_COMPARE_AIGP) + SET_FLAG((*bgp)->flags, BGP_FLAG_COMPARE_AIGP); ret = BGP_SUCCESS; } @@ -1509,13 +1509,12 @@ DEFUN_NOSH (router_bgp, int idx_asn = 2; int idx_view_vrf = 3; int idx_vrf = 4; - int is_new_bgp = 0; int idx_asnotation = 3; int idx_asnotation_kind = 4; enum asnotation_mode asnotation = ASNOTATION_UNDEFINED; int ret; as_t as; - struct bgp *bgp; + struct bgp *bgp = NULL; const char *name = NULL; enum bgp_instance_type inst_type; @@ -1577,35 +1576,40 @@ DEFUN_NOSH (router_bgp, asnotation = ASNOTATION_PLAIN; } - if (inst_type == BGP_INSTANCE_TYPE_DEFAULT) - is_new_bgp = (bgp_lookup(as, name) == NULL); - - ret = bgp_get_vty(&bgp, &as, name, inst_type, - argv[idx_asn]->arg, asnotation); + ret = bgp_lookup_by_as_name_type(&bgp, &as, argv[idx_asn]->arg, asnotation, name, + inst_type, true); + if (bgp && ret == BGP_INSTANCE_EXISTS) + ret = CMD_SUCCESS; + else if (bgp == NULL && ret == CMD_SUCCESS) + /* SUCCESS and bgp is NULL */ + ret = bgp_get_vty(&bgp, &as, name, inst_type, argv[idx_asn]->arg, + asnotation); switch (ret) { case BGP_ERR_AS_MISMATCH: vty_out(vty, "BGP is already running; AS is %s\n", - bgp->as_pretty); + bgp ? bgp->as_pretty : "unknown"); return CMD_WARNING_CONFIG_FAILED; case BGP_ERR_INSTANCE_MISMATCH: vty_out(vty, "BGP instance name and AS number mismatch\n"); - vty_out(vty, - "BGP instance is already running; AS is %s\n", - bgp->as_pretty); + vty_out(vty, "BGP instance is already running; AS is %s\n", + bgp ? bgp->as_pretty : "unknown"); return CMD_WARNING_CONFIG_FAILED; } + if (!bgp) { + vty_out(vty, "BGP instance not found\n"); + return CMD_WARNING_CONFIG_FAILED; + } /* * If we just instantiated the default instance, complete * any pending VRF-VPN leaking that was configured via * earlier "router bgp X vrf FOO" blocks. */ - if (is_new_bgp && inst_type == BGP_INSTANCE_TYPE_DEFAULT) + if (inst_type == BGP_INSTANCE_TYPE_DEFAULT) vpn_leak_postchange_all(); - if (inst_type == BGP_INSTANCE_TYPE_VRF || - IS_BGP_INSTANCE_HIDDEN(bgp)) { + if (inst_type == BGP_INSTANCE_TYPE_VRF || IS_BGP_INSTANCE_HIDDEN(bgp)) { bgp_vpn_leak_export(bgp); UNSET_FLAG(bgp->flags, BGP_FLAG_INSTANCE_HIDDEN); UNSET_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS); @@ -5987,13 +5991,17 @@ DEFUN (neighbor_capability_enhe, { int idx_peer = 1; struct peer *peer; + int ret; peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg); if (peer && peer->conf_if) return CMD_SUCCESS; - return peer_flag_set_vty(vty, argv[idx_peer]->arg, - PEER_FLAG_CAPABILITY_ENHE); + ret = peer_flag_set_vty(vty, argv[idx_peer]->arg, PEER_FLAG_CAPABILITY_ENHE); + + bgp_capability_send(peer, AFI_IP, SAFI_UNICAST, CAPABILITY_CODE_ENHE, CAPABILITY_ACTION_SET); + + return ret; } DEFUN (no_neighbor_capability_enhe, @@ -6007,6 +6015,7 @@ DEFUN (no_neighbor_capability_enhe, { int idx_peer = 2; struct peer *peer; + int ret; peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg); if (peer && peer->conf_if) { @@ -6016,8 +6025,12 @@ DEFUN (no_neighbor_capability_enhe, return CMD_WARNING_CONFIG_FAILED; } - return peer_flag_unset_vty(vty, argv[idx_peer]->arg, - PEER_FLAG_CAPABILITY_ENHE); + ret = peer_flag_unset_vty(vty, argv[idx_peer]->arg, PEER_FLAG_CAPABILITY_ENHE); + + bgp_capability_send(peer, AFI_IP, SAFI_UNICAST, CAPABILITY_CODE_ENHE, + CAPABILITY_ACTION_UNSET); + + return ret; } /* neighbor capability software-version */ @@ -9203,21 +9216,12 @@ DEFUN(neighbor_disable_addpath_rx, struct peer *peer; afi_t afi = bgp_node_afi(vty); safi_t safi = bgp_node_safi(vty); - int ret; - int action; peer = peer_and_group_lookup_vty(vty, peer_str); if (!peer) return CMD_WARNING_CONFIG_FAILED; - action = bgp_addpath_capability_action(peer->addpath_type[afi][safi], 0); - - ret = peer_af_flag_set_vty(vty, peer_str, afi, safi, - PEER_FLAG_DISABLE_ADDPATH_RX); - - bgp_capability_send(peer, afi, safi, CAPABILITY_CODE_ADDPATH, action); - - return ret; + return peer_af_flag_set_vty(vty, peer_str, afi, safi, PEER_FLAG_DISABLE_ADDPATH_RX); } DEFUN(no_neighbor_disable_addpath_rx, @@ -9232,21 +9236,12 @@ DEFUN(no_neighbor_disable_addpath_rx, struct peer *peer; afi_t afi = bgp_node_afi(vty); safi_t safi = bgp_node_safi(vty); - int ret; - int action; peer = peer_and_group_lookup_vty(vty, peer_str); if (!peer) return CMD_WARNING_CONFIG_FAILED; - action = bgp_addpath_capability_action(peer->addpath_type[afi][safi], 0); - - ret = peer_af_flag_unset_vty(vty, peer_str, afi, safi, - PEER_FLAG_DISABLE_ADDPATH_RX); - - bgp_capability_send(peer, afi, safi, CAPABILITY_CODE_ADDPATH, action); - - return ret; + return peer_af_flag_unset_vty(vty, peer_str, afi, safi, PEER_FLAG_DISABLE_ADDPATH_RX); } DEFUN (neighbor_addpath_tx_all_paths, @@ -9258,15 +9253,12 @@ DEFUN (neighbor_addpath_tx_all_paths, { int idx_peer = 1; struct peer *peer; - afi_t afi = bgp_node_afi(vty); - safi_t safi = bgp_node_safi(vty); peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg); if (!peer) return CMD_WARNING_CONFIG_FAILED; - bgp_addpath_set_peer_type(peer, afi, safi, BGP_ADDPATH_ALL, 0); - + bgp_addpath_set_peer_type(peer, bgp_node_afi(vty), bgp_node_safi(vty), BGP_ADDPATH_ALL, 0); return CMD_SUCCESS; } @@ -9286,20 +9278,18 @@ DEFUN (no_neighbor_addpath_tx_all_paths, { int idx_peer = 2; struct peer *peer; - afi_t afi = bgp_node_afi(vty); - safi_t safi = bgp_node_safi(vty); peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg); if (!peer) return CMD_WARNING_CONFIG_FAILED; - if (peer->addpath_type[afi][safi] != BGP_ADDPATH_ALL) { + if (peer->addpath_type[bgp_node_afi(vty)][bgp_node_safi(vty)] != BGP_ADDPATH_ALL) { vty_out(vty, "%% Peer not currently configured to transmit all paths."); return CMD_WARNING_CONFIG_FAILED; } - bgp_addpath_set_peer_type(peer, afi, safi, BGP_ADDPATH_NONE, 0); + bgp_addpath_set_peer_type(peer, bgp_node_afi(vty), bgp_node_safi(vty), BGP_ADDPATH_NONE, 0); return CMD_SUCCESS; } @@ -9805,6 +9795,14 @@ DEFPY (af_rd_vpn_export, vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi, bgp_get_default(), bgp); + if (!bgp->vpn_policy[afi].tovpn_rd_pretty && !rd_str) + return CMD_SUCCESS; + + if (yes && bgp->vpn_policy[afi].tovpn_rd_pretty && rd_str && + strmatch(rd_str, bgp->vpn_policy[afi].tovpn_rd_pretty)) + return CMD_SUCCESS; + + hook_call(bgp_route_distinguisher_update, bgp, afi, true); if (yes) { if (bgp->vpn_policy[afi].tovpn_rd_pretty) XFREE(MTYPE_BGP_NAME, bgp->vpn_policy[afi].tovpn_rd_pretty); @@ -9815,9 +9813,11 @@ DEFPY (af_rd_vpn_export, BGP_VPN_POLICY_TOVPN_RD_SET); } else { XFREE(MTYPE_BGP_NAME, bgp->vpn_policy[afi].tovpn_rd_pretty); + bgp->vpn_policy[afi].tovpn_rd_pretty = NULL; UNSET_FLAG(bgp->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_RD_SET); } + hook_call(bgp_route_distinguisher_update, bgp, afi, false); /* post-change: re-export vpn routes */ vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi, @@ -10550,7 +10550,7 @@ DEFPY(bgp_imexport_vrf, bgp_imexport_vrf_cmd, SET_FLAG(bgp_default->flags, BGP_FLAG_INSTANCE_HIDDEN); } - vrf_bgp = bgp_lookup_by_name(import_name); + vrf_bgp = bgp_lookup_by_name_filter(import_name, false); if (!vrf_bgp) { if (strcmp(import_name, VRF_DEFAULT_NAME) == 0) { vrf_bgp = bgp_default; @@ -11476,6 +11476,72 @@ DEFPY (show_bgp_vrfs, return CMD_SUCCESS; } +DEFPY(show_bgp_router, + show_bgp_router_cmd, + "show bgp router [json]", + SHOW_STR + BGP_STR + "Overall BGP information\n" + JSON_STR) +{ + char timebuf[MONOTIME_STRLEN]; + time_t unix_timestamp; + bool uj = use_json(argc, argv); + json_object *json = NULL; + + if (uj) + json = json_object_new_object(); + + time_to_string(bm->start_time, timebuf); + + if (uj) { + unix_timestamp = time(NULL) - (monotime(NULL) - bm->start_time); + json_object_int_add(json, "bgpStartedAt", unix_timestamp); + json_object_boolean_add(json, "bgpStartedGracefully", + CHECK_FLAG(bm->flags, BM_FLAG_GRACEFUL_RESTART)); + } + + if (CHECK_FLAG(bm->flags, BM_FLAG_GRACEFUL_RESTART)) { + if (!uj) + vty_out(vty, "BGP started gracefully at %s", timebuf); + else + json_object_boolean_add(json, "grComplete", + CHECK_FLAG(bm->flags, BM_FLAG_GR_COMPLETE)); + + if (CHECK_FLAG(bm->flags, BM_FLAG_GR_COMPLETE)) { + time_to_string(bm->gr_completion_time, timebuf); + if (uj) { + unix_timestamp = time(NULL) - + (monotime(NULL) - bm->gr_completion_time); + json_object_int_add(json, "grCompletedAt", unix_timestamp); + } else + vty_out(vty, "Graceful restart completed at %s", timebuf); + } else { + if (!uj) + vty_out(vty, "Graceful restart is in progress\n"); + } + } else { + if (!uj) + vty_out(vty, "BGP started at %s", timebuf); + } + + if (uj) { + json_object_boolean_add(json, "bgpInMaintenanceMode", + (CHECK_FLAG(bm->flags, BM_FLAG_MAINTENANCE_MODE))); + json_object_int_add(json, "bgpInstanceCount", listcount(bm->bgp)); + + vty_json(vty, json); + } else { + if (CHECK_FLAG(bm->flags, BM_FLAG_MAINTENANCE_MODE)) + vty_out(vty, "BGP is in Maintenance mode (BGP GSHUT is in effect)\n"); + + vty_out(vty, "Number of BGP instances (including default): %d\n", + listcount(bm->bgp)); + } + + return CMD_SUCCESS; +} + DEFUN (show_bgp_mac_hash, show_bgp_mac_hash_cmd, "show bgp mac hash", @@ -15779,15 +15845,15 @@ CPP_NOTICE("Remove `gracefulRestartCapability` JSON field") } /* Local address. */ - if (p->su_local) { + if (p->connection->su_local) { if (use_json) { json_object_string_addf(json_neigh, "hostLocal", "%pSU", - p->su_local); + p->connection->su_local); json_object_int_add(json_neigh, "portLocal", - ntohs(p->su_local->sin.sin_port)); + ntohs(p->connection->su_local->sin.sin_port)); } else - vty_out(vty, "Local host: %pSU, Local port: %d\n", - p->su_local, ntohs(p->su_local->sin.sin_port)); + vty_out(vty, "Local host: %pSU, Local port: %d\n", p->connection->su_local, + ntohs(p->connection->su_local->sin.sin_port)); } else { if (use_json) { json_object_string_add(json_neigh, "hostLocal", @@ -15797,16 +15863,16 @@ CPP_NOTICE("Remove `gracefulRestartCapability` JSON field") } /* Remote address. */ - if (p->su_remote) { + if (p->connection->su_remote) { if (use_json) { - json_object_string_addf(json_neigh, "hostForeign", - "%pSU", p->su_remote); + json_object_string_addf(json_neigh, "hostForeign", "%pSU", + p->connection->su_remote); json_object_int_add(json_neigh, "portForeign", - ntohs(p->su_remote->sin.sin_port)); + ntohs(p->connection->su_remote->sin.sin_port)); } else vty_out(vty, "Foreign host: %pSU, Foreign port: %d\n", - p->su_remote, - ntohs(p->su_remote->sin.sin_port)); + p->connection->su_remote, + ntohs(p->connection->su_remote->sin.sin_port)); } else { if (use_json) { json_object_string_add(json_neigh, "hostForeign", @@ -15816,7 +15882,7 @@ CPP_NOTICE("Remove `gracefulRestartCapability` JSON field") } /* Nexthop display. */ - if (p->su_local) { + if (p->connection->su_local) { if (use_json) { json_object_string_addf(json_neigh, "nexthop", "%pI4", &p->nexthop.v4); @@ -17519,12 +17585,6 @@ DEFUN (bgp_redistribute_ipv4_ospf, if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0) protocol = ZEBRA_ROUTE_OSPF; else { - if (bgp->vrf_id != VRF_DEFAULT) { - vty_out(vty, - "%% Only default BGP instance can use '%s'\n", - argv[idx_ospf_table]->arg); - return CMD_WARNING_CONFIG_FAILED; - } if (strncmp(argv[idx_ospf_table]->arg, "table-direct", strlen("table-direct")) == 0) { protocol = ZEBRA_ROUTE_TABLE_DIRECT; @@ -17578,12 +17638,6 @@ DEFUN (bgp_redistribute_ipv4_ospf_rmap, if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0) protocol = ZEBRA_ROUTE_OSPF; else { - if (bgp->vrf_id != VRF_DEFAULT) { - vty_out(vty, - "%% Only default BGP instance can use '%s'\n", - argv[idx_ospf_table]->arg); - return CMD_WARNING_CONFIG_FAILED; - } if (strncmp(argv[idx_ospf_table]->arg, "table-direct", strlen("table-direct")) == 0) { protocol = ZEBRA_ROUTE_TABLE_DIRECT; @@ -17641,12 +17695,6 @@ DEFUN (bgp_redistribute_ipv4_ospf_metric, if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0) protocol = ZEBRA_ROUTE_OSPF; else { - if (bgp->vrf_id != VRF_DEFAULT) { - vty_out(vty, - "%% Only default BGP instance can use '%s'\n", - argv[idx_ospf_table]->arg); - return CMD_WARNING_CONFIG_FAILED; - } if (strncmp(argv[idx_ospf_table]->arg, "table-direct", strlen("table-direct")) == 0) { protocol = ZEBRA_ROUTE_TABLE_DIRECT; @@ -17711,12 +17759,6 @@ DEFUN (bgp_redistribute_ipv4_ospf_rmap_metric, if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0) protocol = ZEBRA_ROUTE_OSPF; else { - if (bgp->vrf_id != VRF_DEFAULT) { - vty_out(vty, - "%% Only default BGP instance can use '%s'\n", - argv[idx_ospf_table]->arg); - return CMD_WARNING_CONFIG_FAILED; - } if (strncmp(argv[idx_ospf_table]->arg, "table-direct", strlen("table-direct")) == 0) { protocol = ZEBRA_ROUTE_TABLE_DIRECT; @@ -17786,13 +17828,7 @@ DEFUN (bgp_redistribute_ipv4_ospf_metric_rmap, if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0) protocol = ZEBRA_ROUTE_OSPF; else { - if (bgp->vrf_id != VRF_DEFAULT) { - vty_out(vty, - "%% Only default BGP instance can use '%s'\n", - argv[idx_ospf_table]->arg); - return CMD_WARNING_CONFIG_FAILED; - } else if (strncmp(argv[idx_ospf_table]->arg, "table-direct", - strlen("table-direct")) == 0) { + if (strncmp(argv[idx_ospf_table]->arg, "table-direct", strlen("table-direct")) == 0) { protocol = ZEBRA_ROUTE_TABLE_DIRECT; if (instance == RT_TABLE_MAIN || instance == RT_TABLE_LOCAL) { @@ -17855,12 +17891,6 @@ DEFUN (no_bgp_redistribute_ipv4_ospf, if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0) protocol = ZEBRA_ROUTE_OSPF; else { - if (bgp->vrf_id != VRF_DEFAULT) { - vty_out(vty, - "%% Only default BGP instance can use '%s'\n", - argv[idx_ospf_table]->arg); - return CMD_WARNING_CONFIG_FAILED; - } if (strncmp(argv[idx_ospf_table]->arg, "table-direct", strlen("table-direct")) == 0) { protocol = ZEBRA_ROUTE_TABLE_DIRECT; @@ -19760,14 +19790,19 @@ int bgp_config_write(struct vty *vty) } } - if (CHECK_FLAG(bgp->flags, BGP_FLAG_RR_ALLOW_OUTBOUND_POLICY)) { - vty_out(vty, - " bgp route-reflector allow-outbound-policy\n"); - } + if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_RR_ALLOW_OUTBOUND_POLICY) != + SAVE_BGP_RR_ALLOW_OUTBOUND_POLICY) + vty_out(vty, " %sbgp route-reflector allow-outbound-policy\n", + CHECK_FLAG(bgp->flags, BGP_FLAG_RR_ALLOW_OUTBOUND_POLICY) ? "" + : "no "); + if (CHECK_FLAG(bgp->flags, BGP_FLAG_COMPARE_ROUTER_ID)) vty_out(vty, " bgp bestpath compare-routerid\n"); - if (CHECK_FLAG(bgp->flags, BGP_FLAG_COMPARE_AIGP)) - vty_out(vty, " bgp bestpath aigp\n"); + + if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_COMPARE_AIGP) != SAVE_BGP_COMPARE_AIGP) + vty_out(vty, " %sbgp bestpath aigp\n", + CHECK_FLAG(bgp->flags, BGP_FLAG_COMPARE_AIGP) ? "" : "no "); + if (CHECK_FLAG(bgp->flags, BGP_FLAG_MED_CONFED) || CHECK_FLAG(bgp->flags, BGP_FLAG_MED_MISSING_AS_WORST)) { vty_out(vty, " bgp bestpath med"); @@ -21927,6 +21962,9 @@ void bgp_vty_init(void) /* "show [ip] bgp vrfs" commands. */ install_element(VIEW_NODE, &show_bgp_vrfs_cmd); + /* Some overall BGP information */ + install_element(VIEW_NODE, &show_bgp_router_cmd); + /* Community-list. */ community_list_vty(); diff --git a/bgpd/bgp_vty.h b/bgpd/bgp_vty.h index f88f5c8125..00a3135073 100644 --- a/bgpd/bgp_vty.h +++ b/bgpd/bgp_vty.h @@ -10,6 +10,19 @@ #include "stream.h" struct bgp; +FRR_CFG_DEFAULT_ULONG(BGP_KEEPALIVE, + { .val_ulong = 3, .match_profile = "datacenter", }, + { .val_ulong = BGP_DEFAULT_KEEPALIVE }, +); +FRR_CFG_DEFAULT_ULONG(BGP_HOLDTIME, + { .val_ulong = 9, .match_profile = "datacenter", }, + { .val_ulong = BGP_DEFAULT_HOLDTIME }, +); +FRR_CFG_DEFAULT_ULONG(BGP_CONNECT_RETRY, + { .val_ulong = 10, .match_profile = "datacenter", }, + { .val_ulong = BGP_DEFAULT_CONNECT_RETRY }, +); + #define BGP_INSTANCE_HELP_STR "BGP view\nBGP VRF\nView/VRF name\n" #define BGP_INSTANCE_ALL_HELP_STR "BGP view\nBGP VRF\nAll Views/VRFs\n" diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index e3465feda8..1669aabc60 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -953,13 +953,10 @@ bgp_path_info_to_ipv6_nexthop(struct bgp_path_info *path, ifindex_t *ifindex) *ifindex = path->attr->nh_ifindex; } else { /* Workaround for Cisco's nexthop bug. */ - if (IN6_IS_ADDR_UNSPECIFIED( - &path->attr->mp_nexthop_global) - && path->peer->su_remote - && path->peer->su_remote->sa.sa_family - == AF_INET6) { - nexthop = - &path->peer->su_remote->sin6.sin6_addr; + if (IN6_IS_ADDR_UNSPECIFIED(&path->attr->mp_nexthop_global) && + path->peer->connection->su_remote && + path->peer->connection->su_remote->sa.sa_family == AF_INET6) { + nexthop = &path->peer->connection->su_remote->sin6.sin6_addr; if (IN6_IS_ADDR_LINKLOCAL(nexthop)) *ifindex = path->peer->nexthop.ifp ->ifindex; @@ -1339,7 +1336,7 @@ static void bgp_zebra_announce_parse_nexthop( * overridden on 1st nexthop */ if (mpinfo == info) { if (metric) - *metric = mpinfo_cp->attr->med; + *metric = bgp_med_value(mpinfo_cp->attr, bgp); if (tag) *tag = mpinfo_cp->attr->tag; } @@ -1677,11 +1674,23 @@ void bgp_zebra_announce_table(struct bgp *bgp, afi_t afi, safi_t safi) for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED) && - (pi->type == ZEBRA_ROUTE_BGP - && (pi->sub_type == BGP_ROUTE_NORMAL - || pi->sub_type == BGP_ROUTE_IMPORTED))) - bgp_zebra_route_install(dest, pi, bgp, true, - NULL, false); + (pi->type == ZEBRA_ROUTE_BGP && (pi->sub_type == BGP_ROUTE_NORMAL || + pi->sub_type == BGP_ROUTE_IMPORTED))) { + bool is_add = true; + + if (bgp->table_map[afi][safi].name) { + struct attr local_attr = *pi->attr; + struct bgp_path_info local_info = *pi; + + local_info.attr = &local_attr; + + is_add = bgp_table_map_apply(bgp->table_map[afi][safi].map, + bgp_dest_get_prefix(dest), + &local_info); + } + + bgp_zebra_route_install(dest, pi, bgp, is_add, NULL, false); + } } /* Announce routes of any bgp subtype of a table to zebra */ @@ -2045,11 +2054,34 @@ int bgp_redistribute_set(struct bgp *bgp, afi_t afi, int type, /* Return if already redistribute flag is set. */ if (instance) { - if (redist_check_instance(&zclient->mi_redist[afi][type], - instance)) - return CMD_WARNING; + if (type == ZEBRA_ROUTE_TABLE_DIRECT) { + /* + * When redistribution type is `table-direct` the + * instance means `table identification`. + * + * `table_id` support 32bit integers, however since + * `instance` is being overloaded to `table_id` it + * will only be possible to use the first 65535 + * entries. + * + * Also the ZAPI must also support `int` + * (see `zebra_redistribute_add`). + */ + struct redist_table_direct table = { + .table_id = instance, + .vrf_id = bgp->vrf_id, + }; + if (redist_lookup_table_direct(&zclient->mi_redist[afi][type], &table) != + NULL) + return CMD_WARNING; + + redist_add_table_direct(&zclient->mi_redist[afi][type], &table); + } else { + if (redist_check_instance(&zclient->mi_redist[afi][type], instance)) + return CMD_WARNING; - redist_add_instance(&zclient->mi_redist[afi][type], instance); + redist_add_instance(&zclient->mi_redist[afi][type], instance); + } } else { if (vrf_bitmap_check(&zclient->redist[afi][type], bgp->vrf_id)) return CMD_WARNING; @@ -2177,10 +2209,22 @@ int bgp_redistribute_unreg(struct bgp *bgp, afi_t afi, int type, /* Return if zebra connection is disabled. */ if (instance) { - if (!redist_check_instance(&zclient->mi_redist[afi][type], - instance)) - return CMD_WARNING; - redist_del_instance(&zclient->mi_redist[afi][type], instance); + if (type == ZEBRA_ROUTE_TABLE_DIRECT) { + struct redist_table_direct table = { + .table_id = instance, + .vrf_id = bgp->vrf_id, + }; + if (redist_lookup_table_direct(&zclient->mi_redist[afi][type], &table) == + NULL) + return CMD_WARNING; + + redist_del_table_direct(&zclient->mi_redist[afi][type], &table); + } else { + if (!redist_check_instance(&zclient->mi_redist[afi][type], instance)) + return CMD_WARNING; + + redist_del_instance(&zclient->mi_redist[afi][type], instance); + } } else { if (!vrf_bitmap_check(&zclient->redist[afi][type], bgp->vrf_id)) return CMD_WARNING; @@ -3378,12 +3422,15 @@ static int bgp_ifp_create(struct interface *ifp) zlog_debug("Rx Intf add VRF %s IF %s", ifp->vrf->name, ifp->name); + /* We don't need to check for vrf->bgp link to add this local MAC + * to the hash table as the tenant VRF might not have the BGP instance. + */ + bgp_mac_add_mac_entry(ifp); + bgp = ifp->vrf->info; if (!bgp) return 0; - bgp_mac_add_mac_entry(ifp); - bgp_update_interface_nbrs(bgp, ifp, ifp); hook_call(bgp_vrf_status_changed, bgp, ifp); diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index 2f234e3a5a..efb2c00fa5 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -86,6 +86,7 @@ DEFINE_QOBJ_TYPE(bgp); DEFINE_QOBJ_TYPE(peer); DEFINE_HOOK(bgp_inst_delete, (struct bgp *bgp), (bgp)); DEFINE_HOOK(bgp_instance_state, (struct bgp *bgp), (bgp)); +DEFINE_HOOK(bgp_routerid_update, (struct bgp *bgp, bool withdraw), (bgp, withdraw)); /* BGP process wide configuration. */ static struct bgp_master bgp_master; @@ -301,6 +302,8 @@ static int bgp_router_id_set(struct bgp *bgp, const struct in_addr *id, vpn_handle_router_id_update(bgp, true, is_config); + hook_call(bgp_routerid_update, bgp, true); + IPV4_ADDR_COPY(&bgp->router_id, id); /* Set all peer's local identifier with this value. */ @@ -318,6 +321,7 @@ static int bgp_router_id_set(struct bgp *bgp, const struct in_addr *id, vpn_handle_router_id_update(bgp, false, is_config); + hook_call(bgp_routerid_update, bgp, false); return 0; } @@ -579,9 +583,9 @@ void bgp_timers_set(struct vty *vty, struct bgp *bgp, uint32_t keepalive, /* mostly for completeness - CLI uses its own defaults */ void bgp_timers_unset(struct bgp *bgp) { - bgp->default_keepalive = BGP_DEFAULT_KEEPALIVE; - bgp->default_holdtime = BGP_DEFAULT_HOLDTIME; - bgp->default_connect_retry = BGP_DEFAULT_CONNECT_RETRY; + bgp->default_keepalive = DFLT_BGP_KEEPALIVE; + bgp->default_holdtime = DFLT_BGP_HOLDTIME; + bgp->default_connect_retry = DFLT_BGP_CONNECT_RETRY; bgp->default_delayopen = BGP_DEFAULT_DELAYOPEN; } @@ -2022,8 +2026,11 @@ struct peer *peer_create(union sockunion *su, const char *conf_if, if (bgp->autoshutdown) peer_flag_set(peer, PEER_FLAG_SHUTDOWN); /* Set up peer's events and timers. */ - else if (!active && peer_active(peer->connection)) + else if (!active && peer_active(peer->connection)) { + if (peer->last_reset == PEER_DOWN_NOAFI_ACTIVATED) + peer->last_reset = 0; bgp_timer_set(peer->connection); + } bgp_peer_gr_flags_update(peer); BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(bgp, bgp->peer); @@ -2764,14 +2771,14 @@ int peer_delete(struct peer *peer) } /* Local and remote addresses. */ - if (peer->su_local) { - sockunion_free(peer->su_local); - peer->su_local = NULL; + if (peer->connection->su_local) { + sockunion_free(peer->connection->su_local); + peer->connection->su_local = NULL; } - if (peer->su_remote) { - sockunion_free(peer->su_remote); - peer->su_remote = NULL; + if (peer->connection->su_remote) { + sockunion_free(peer->connection->su_remote); + peer->connection->su_remote = NULL; } /* Free filter related memory. */ @@ -3630,13 +3637,13 @@ struct bgp *bgp_lookup(as_t as, const char *name) } /* Lookup BGP structure by view name. */ -struct bgp *bgp_lookup_by_name(const char *name) +struct bgp *bgp_lookup_by_name_filter(const char *name, bool filter_auto) { struct bgp *bgp; struct listnode *node, *nnode; for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) { - if (CHECK_FLAG(bgp->vrf_flags, BGP_VRF_AUTO)) + if (filter_auto && CHECK_FLAG(bgp->vrf_flags, BGP_VRF_AUTO)) continue; if ((bgp->name == NULL && name == NULL) || (bgp->name && name && strcmp(bgp->name, name) == 0)) @@ -3645,6 +3652,11 @@ struct bgp *bgp_lookup_by_name(const char *name) return NULL; } +struct bgp *bgp_lookup_by_name(const char *name) +{ + return bgp_lookup_by_name_filter(name, true); +} + /* Lookup BGP instance based on VRF id. */ /* Note: Only to be used for incoming messages from Zebra. */ struct bgp *bgp_lookup_by_vrf_id(vrf_id_t vrf_id) @@ -3730,10 +3742,9 @@ int bgp_handle_socket(struct bgp *bgp, struct vrf *vrf, vrf_id_t old_vrf_id, return bgp_check_main_socket(create, bgp); } -int bgp_lookup_by_as_name_type(struct bgp **bgp_val, as_t *as, - const char *as_pretty, +int bgp_lookup_by_as_name_type(struct bgp **bgp_val, as_t *as, const char *as_pretty, enum asnotation_mode asnotation, const char *name, - enum bgp_instance_type inst_type) + enum bgp_instance_type inst_type, bool force_config) { struct bgp *bgp; struct peer *peer = NULL; @@ -3742,7 +3753,7 @@ int bgp_lookup_by_as_name_type(struct bgp **bgp_val, as_t *as, /* Multiple instance check. */ if (name) - bgp = bgp_lookup_by_name(name); + bgp = bgp_lookup_by_name_filter(name, !force_config); else bgp = bgp_get_default(); @@ -3752,7 +3763,7 @@ int bgp_lookup_by_as_name_type(struct bgp **bgp_val, as_t *as, /* Handle AS number change */ if (bgp->as != *as) { if (hidden || CHECK_FLAG(bgp->vrf_flags, BGP_VRF_AUTO)) { - if (hidden) { + if (force_config == false && hidden) { bgp_create(as, name, inst_type, as_pretty, asnotation, bgp, hidden); @@ -3760,7 +3771,8 @@ int bgp_lookup_by_as_name_type(struct bgp **bgp_val, as_t *as, BGP_FLAG_INSTANCE_HIDDEN); } else { bgp->as = *as; - UNSET_FLAG(bgp->vrf_flags, BGP_VRF_AUTO); + if (force_config == false) + UNSET_FLAG(bgp->vrf_flags, BGP_VRF_AUTO); } /* Set all peer's local AS with this ASN */ @@ -3797,8 +3809,7 @@ int bgp_get(struct bgp **bgp_val, as_t *as, const char *name, struct vrf *vrf = NULL; int ret = 0; - ret = bgp_lookup_by_as_name_type(bgp_val, as, as_pretty, asnotation, - name, inst_type); + ret = bgp_lookup_by_as_name_type(bgp_val, as, as_pretty, asnotation, name, inst_type, false); if (ret || *bgp_val) return ret; @@ -3977,6 +3988,7 @@ int bgp_delete(struct bgp *bgp) uint32_t a_ann_cnt = 0, a_l2_cnt = 0, a_l3_cnt = 0; struct bgp *bgp_to_proc = NULL; struct bgp *bgp_to_proc_next = NULL; + struct bgp *bgp_default = bgp_get_default(); assert(bgp); @@ -4030,13 +4042,26 @@ int bgp_delete(struct bgp *bgp) bgp_soft_reconfig_table_task_cancel(bgp, NULL, NULL); /* make sure we withdraw any exported routes */ - vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP, bgp_get_default(), - bgp); - vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP6, bgp_get_default(), - bgp); + vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP, bgp_default, bgp); + vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP6, bgp_default, bgp); bgp_vpn_leak_unimport(bgp); + /* + * Release SRv6 SIDs, like it's done in `vpn_leak_postchange()` + * and bgp_sid_vpn_export_cmd/af_sid_vpn_export_cmd commands. + */ + bgp->tovpn_sid_index = 0; + UNSET_FLAG(bgp->vrf_flags, BGP_VRF_TOVPN_SID_AUTO); + delete_vrf_tovpn_sid_per_vrf(bgp_default, bgp); + for (afi = AFI_IP; afi < AFI_MAX; afi++) { + bgp->vpn_policy[afi].tovpn_sid_index = 0; + UNSET_FLAG(bgp->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_SID_AUTO); + delete_vrf_tovpn_sid_per_af(bgp_default, bgp, afi); + + vpn_leak_zebra_vrf_sid_withdraw(bgp, afi); + } + bgp_vpn_release_label(bgp, AFI_IP, true); bgp_vpn_release_label(bgp, AFI_IP6, true); @@ -4812,39 +4837,40 @@ static const struct peer_flag_action peer_flag_action_list[] = { {0, 0, 0}}; static const struct peer_flag_action peer_af_flag_action_list[] = { - {PEER_FLAG_SEND_COMMUNITY, 1, peer_change_reset_out}, - {PEER_FLAG_SEND_EXT_COMMUNITY, 1, peer_change_reset_out}, - {PEER_FLAG_SEND_LARGE_COMMUNITY, 1, peer_change_reset_out}, - {PEER_FLAG_NEXTHOP_SELF, 1, peer_change_reset_out}, - {PEER_FLAG_REFLECTOR_CLIENT, 1, peer_change_reset}, - {PEER_FLAG_RSERVER_CLIENT, 1, peer_change_reset}, - {PEER_FLAG_SOFT_RECONFIG, 0, peer_change_reset_in}, - {PEER_FLAG_AS_PATH_UNCHANGED, 1, peer_change_reset_out}, - {PEER_FLAG_NEXTHOP_UNCHANGED, 1, peer_change_reset_out}, - {PEER_FLAG_MED_UNCHANGED, 1, peer_change_reset_out}, - {PEER_FLAG_DEFAULT_ORIGINATE, 0, peer_change_none}, - {PEER_FLAG_REMOVE_PRIVATE_AS, 1, peer_change_reset_out}, - {PEER_FLAG_ALLOWAS_IN, 0, peer_change_reset_in}, - {PEER_FLAG_ALLOWAS_IN_ORIGIN, 0, peer_change_reset_in}, - {PEER_FLAG_ORF_PREFIX_SM, 1, peer_change_reset}, - {PEER_FLAG_ORF_PREFIX_RM, 1, peer_change_reset}, - {PEER_FLAG_MAX_PREFIX, 0, peer_change_none}, - {PEER_FLAG_MAX_PREFIX_WARNING, 0, peer_change_none}, - {PEER_FLAG_MAX_PREFIX_FORCE, 0, peer_change_none}, - {PEER_FLAG_MAX_PREFIX_OUT, 0, peer_change_none}, - {PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED, 0, peer_change_reset_out}, - {PEER_FLAG_FORCE_NEXTHOP_SELF, 1, peer_change_reset_out}, - {PEER_FLAG_REMOVE_PRIVATE_AS_ALL, 1, peer_change_reset_out}, - {PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE, 1, peer_change_reset_out}, - {PEER_FLAG_AS_OVERRIDE, 1, peer_change_reset_out}, - {PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE, 1, peer_change_reset_out}, - {PEER_FLAG_WEIGHT, 0, peer_change_reset_in}, - {PEER_FLAG_DISABLE_ADDPATH_RX, 0, peer_change_none}, - {PEER_FLAG_SOO, 0, peer_change_reset}, - {PEER_FLAG_ACCEPT_OWN, 0, peer_change_reset}, - {PEER_FLAG_SEND_EXT_COMMUNITY_RPKI, 1, peer_change_reset_out}, - {PEER_FLAG_ADDPATH_RX_PATHS_LIMIT, 0, peer_change_none}, - {0, 0, 0}}; + { PEER_FLAG_SEND_COMMUNITY, 1, peer_change_reset_out }, + { PEER_FLAG_SEND_EXT_COMMUNITY, 1, peer_change_reset_out }, + { PEER_FLAG_SEND_LARGE_COMMUNITY, 1, peer_change_reset_out }, + { PEER_FLAG_NEXTHOP_SELF, 1, peer_change_reset_out }, + { PEER_FLAG_REFLECTOR_CLIENT, 1, peer_change_reset }, + { PEER_FLAG_RSERVER_CLIENT, 1, peer_change_reset }, + { PEER_FLAG_SOFT_RECONFIG, 0, peer_change_reset_in }, + { PEER_FLAG_AS_PATH_UNCHANGED, 1, peer_change_reset_out }, + { PEER_FLAG_NEXTHOP_UNCHANGED, 1, peer_change_reset_out }, + { PEER_FLAG_MED_UNCHANGED, 1, peer_change_reset_out }, + { PEER_FLAG_DEFAULT_ORIGINATE, 0, peer_change_none }, + { PEER_FLAG_REMOVE_PRIVATE_AS, 1, peer_change_reset_out }, + { PEER_FLAG_ALLOWAS_IN, 0, peer_change_reset_in }, + { PEER_FLAG_ALLOWAS_IN_ORIGIN, 0, peer_change_reset_in }, + { PEER_FLAG_ORF_PREFIX_SM, 1, peer_change_reset }, + { PEER_FLAG_ORF_PREFIX_RM, 1, peer_change_reset }, + { PEER_FLAG_MAX_PREFIX, 0, peer_change_none }, + { PEER_FLAG_MAX_PREFIX_WARNING, 0, peer_change_none }, + { PEER_FLAG_MAX_PREFIX_FORCE, 0, peer_change_none }, + { PEER_FLAG_MAX_PREFIX_OUT, 0, peer_change_none }, + { PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED, 0, peer_change_reset_out }, + { PEER_FLAG_FORCE_NEXTHOP_SELF, 1, peer_change_reset_out }, + { PEER_FLAG_REMOVE_PRIVATE_AS_ALL, 1, peer_change_reset_out }, + { PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE, 1, peer_change_reset_out }, + { PEER_FLAG_AS_OVERRIDE, 1, peer_change_reset_out }, + { PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE, 1, peer_change_reset_out }, + { PEER_FLAG_WEIGHT, 0, peer_change_reset_in }, + { PEER_FLAG_DISABLE_ADDPATH_RX, 0, peer_change_reset }, + { PEER_FLAG_SOO, 0, peer_change_reset }, + { PEER_FLAG_ACCEPT_OWN, 0, peer_change_reset }, + { PEER_FLAG_SEND_EXT_COMMUNITY_RPKI, 1, peer_change_reset_out }, + { PEER_FLAG_ADDPATH_RX_PATHS_LIMIT, 0, peer_change_none }, + { 0, 0, 0 } +}; /* Proper action set. */ static int peer_flag_action_set(const struct peer_flag_action *action_list, @@ -4949,6 +4975,10 @@ static void peer_flag_modify_action(struct peer *peer, uint64_t flag) peer->v_start = BGP_INIT_START_TIMER; BGP_EVENT_ADD(peer->connection, BGP_Stop); } + } else if (CHECK_FLAG(peer->cap, PEER_CAP_DYNAMIC_RCV) && + CHECK_FLAG(peer->cap, PEER_CAP_DYNAMIC_ADV) && + flag == PEER_FLAG_CAPABILITY_ENHE) { + peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE; } else if (!peer_notify_config_change(peer->connection)) bgp_session_reset(peer); } @@ -6686,7 +6716,7 @@ int peer_allowas_in_set(struct peer *peer, afi_t afi, safi_t safi, SET_FLAG(member->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN_ORIGIN); member->allowas_in[afi][safi] = 0; - peer_on_policy_change(peer, afi, safi, 0); + peer_on_policy_change(member, afi, safi, 0); } } else { if (member->allowas_in[afi][safi] != allow_num @@ -6695,7 +6725,7 @@ int peer_allowas_in_set(struct peer *peer, afi_t afi, safi_t safi, UNSET_FLAG(member->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN_ORIGIN); member->allowas_in[afi][safi] = allow_num; - peer_on_policy_change(peer, afi, safi, 0); + peer_on_policy_change(member, afi, safi, 0); } } } diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index 47214e52e5..96a78e6662 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -907,6 +907,9 @@ DECLARE_HOOK(bgp_config_end, (struct bgp *bgp), (bgp)); DECLARE_HOOK(bgp_hook_vrf_update, (struct vrf *vrf, bool enabled), (vrf, enabled)); DECLARE_HOOK(bgp_instance_state, (struct bgp *bgp), (bgp)); +DECLARE_HOOK(bgp_routerid_update, (struct bgp *bgp, bool withdraw), (bgp, withdraw)); +DECLARE_HOOK(bgp_route_distinguisher_update, (struct bgp *bgp, afi_t afi, bool preconfig), + (bgp, afi, preconfig)); /* Thread callback information */ struct afi_safi_info { @@ -1255,6 +1258,9 @@ struct peer_connection { union sockunion su; #define BGP_CONNECTION_SU_UNSPEC(connection) \ (connection->su.sa.sa_family == AF_UNSPEC) + + union sockunion *su_local; /* Sockunion of local address. */ + union sockunion *su_remote; /* Sockunion of remote address. */ }; extern struct peer_connection *bgp_peer_connection_new(struct peer *peer); extern void bgp_peer_connection_free(struct peer_connection **connection); @@ -1347,8 +1353,6 @@ struct peer { char *update_if; union sockunion *update_source; - union sockunion *su_local; /* Sockunion of local address. */ - union sockunion *su_remote; /* Sockunion of remote address. */ bool shared_network; /* Is this peer shared same network. */ struct bgp_nexthop nexthop; /* Nexthop */ @@ -2281,6 +2285,7 @@ extern void bgp_zclient_reset(void); extern struct bgp *bgp_get_default(void); extern struct bgp *bgp_lookup(as_t, const char *); extern struct bgp *bgp_lookup_by_name(const char *); +extern struct bgp *bgp_lookup_by_name_filter(const char *name, bool filter_auto); extern struct bgp *bgp_lookup_by_vrf_id(vrf_id_t); extern struct bgp *bgp_get_evpn(void); extern void bgp_set_evpn(struct bgp *bgp); @@ -2855,11 +2860,9 @@ extern struct peer *peer_new(struct bgp *bgp); extern struct peer *peer_lookup_in_view(struct vty *vty, struct bgp *bgp, const char *ip_str, bool use_json); -extern int bgp_lookup_by_as_name_type(struct bgp **bgp_val, as_t *as, - const char *as_pretty, - enum asnotation_mode asnotation, - const char *name, - enum bgp_instance_type inst_type); +extern int bgp_lookup_by_as_name_type(struct bgp **bgp_val, as_t *as, const char *as_pretty, + enum asnotation_mode asnotation, const char *name, + enum bgp_instance_type inst_type, bool force_config); /* Hooks */ DECLARE_HOOK(bgp_vrf_status_changed, (struct bgp *bgp, struct interface *ifp), diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c index 61d154f1b4..241cbcb359 100644 --- a/bgpd/rfapi/rfapi.c +++ b/bgpd/rfapi/rfapi.c @@ -1029,8 +1029,8 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */ rfapiPrintBi(NULL, new); } - bgp_aggregate_increment(bgp, p, new, afi, safi); bgp_path_info_add(bn, new); + bgp_aggregate_increment(bgp, p, new, afi, safi); if (safi == SAFI_MPLS_VPN) { struct bgp_dest *pdest = NULL; diff --git a/bgpd/rfapi/rfapi_import.c b/bgpd/rfapi/rfapi_import.c index 44dfc88cf7..99d8bcfce4 100644 --- a/bgpd/rfapi/rfapi_import.c +++ b/bgpd/rfapi/rfapi_import.c @@ -1931,8 +1931,8 @@ static void rfapiBgpInfoAttachSorted(struct agg_node *rn, if (VNC_DEBUG(IMPORT_BI_ATTACH)) { vnc_zlog_debug_verbose("%s: info_new->peer=%p", __func__, info_new->peer); - vnc_zlog_debug_verbose("%s: info_new->peer->su_remote=%p", - __func__, info_new->peer->su_remote); + vnc_zlog_debug_verbose("%s: info_new->peer->su_remote=%p", __func__, + info_new->peer->connection->su_remote); } for (prev = NULL, next = rn->info; next; diff --git a/configure.ac b/configure.ac index e8036fcff1..e04c0b6d46 100644 --- a/configure.ac +++ b/configure.ac @@ -375,26 +375,45 @@ fi AM_CONDITIONAL([SCRIPTING], [test "$enable_scripting" = "yes"]) if test "$enable_scripting" = "yes"; then - AX_PROG_LUA([5.3], [5.4], [], [ - AC_MSG_ERROR([Lua 5.3 is required to build with Lua support. No other version is supported.]) + AX_PROG_LUA([5.3], [], [], [ + AC_MSG_ERROR([Lua >= 5.3 is required to build with Lua support. No other version is supported.]) ]) AX_LUA_HEADERS([], [ - AC_MSG_ERROR([Lua 5.3 headers are required to build with Lua support. No other version is supported.]) + AC_MSG_ERROR([Lua >= 5.3 headers are required to build with Lua support. No other version is supported.]) ]) - PKG_CHECK_MODULES([LUA], [lua5.3], [ - AC_DEFINE([HAVE_SCRIPTING], [1], [Have support for scripting]) - LIBS="$LIBS $LUA_LIBS" - SCRIPTING=true - ], [ - AX_LUA_LIBS([ - AC_DEFINE([HAVE_SCRIPTING], [1], [Have support for scripting]) - LIBS="$LIBS $LUA_LIB" - SCRIPTING=true - ], [ - SCRIPTING=false - AC_MSG_ERROR([Lua 5.3 libraries are required to build with Lua support. No other version is supported.]) - ]) - ]) + + for version in 5.3 5.4; do + PKG_CHECK_MODULES([LUA], [lua >= $version], [ + AC_DEFINE([HAVE_SCRIPTING], [1], [Have support for scripting]) + LIBS="$LIBS $LUA_LIBS" + SCRIPTING=true + break + ], [ + PKG_CHECK_MODULES([LUA], [lua$version], [ + AC_DEFINE([HAVE_SCRIPTING], [1], [Have support for scripting]) + LIBS="$LIBS $LUA_LIBS" + SCRIPTING=true + break + ], [ + PKG_CHECK_MODULES([LUA], [lua-$version], [ + AC_DEFINE([HAVE_SCRIPTING], [1], [Have support for scripting]) + LIBS="$LIBS $LUA_LIBS" + SCRIPTING=true + break + ], []) + ]) + ]) + done + + if [ "$SCRIPTING" != "true" ]; then + AX_LUA_LIBS([ + AC_DEFINE([HAVE_SCRIPTING], [1], [Have support for scripting]) + LIBS="$LIBS $LUA_LIB" + SCRIPTING=true + ], [ + AC_MSG_ERROR([Lua >= 5.3 libraries are required to build with Lua support. No other version is supported.]) + ]) + fi fi dnl the following flags go in CFLAGS rather than AC_CFLAGS since they make diff --git a/doc/developer/building-doc.rst b/doc/developer/building-doc.rst new file mode 100644 index 0000000000..bf0544ccc8 --- /dev/null +++ b/doc/developer/building-doc.rst @@ -0,0 +1,62 @@ +Building Documentation +====================== + +To build FRR documentation, first install the dependencies. +Notice that if you plan to only build html documenation, you only +need the package ``python3-sphinx``. + +.. code-block:: console + + sudo apt-get install -y python3-sphinx \ + texlive-latex-base texlive-latex-extra latexmk + +To prepate for building both user and developer documentation, do: + +.. code-block:: console + + cd doc + make + +User documentation +------------------ + +To build html user documentation: + +.. code-block:: console + + cd user + make html + +This will generate html documentation files under ``_build/html/``. +With the main page named ``index.html``. + +PFD can then be built by: + +.. code-block:: console + + cd user + make pdf + +The generated PDF file will be saved at ``_build/latex/FRR.pdf`` + +Developer documentation +----------------------- + +To build the developer documentation: + +.. code-block:: console + + cd developer + make html + +This will generate html documentation files under ``_build/html/``. +With the main page named ``index.html``. + +PFD can then be built by: + +.. code-block:: console + + cd developer + make pdf + +The generated PDF file will be saved at ``_build/latex/FRR.pdf`` diff --git a/doc/developer/building-frr-for-alpine.rst b/doc/developer/building-frr-for-alpine.rst index 68e58c9d76..a5ce636ebb 100644 --- a/doc/developer/building-frr-for-alpine.rst +++ b/doc/developer/building-frr-for-alpine.rst @@ -47,11 +47,11 @@ Build apk packages ./docker/alpine/build.sh -This will put the apk packages in: +This will put the apk packages into the architecture folder in: :: - ./docker/pkgs/apk/x86_64/ + ./docker/alpine/pkgs/apk/ Usage ----- diff --git a/doc/developer/building-frr-for-ubuntu2004.rst b/doc/developer/building-frr-for-ubuntu2004.rst index 3db97c4b2d..19353e317d 100644 --- a/doc/developer/building-frr-for-ubuntu2004.rst +++ b/doc/developer/building-frr-for-ubuntu2004.rst @@ -1,163 +1,4 @@ Ubuntu 20.04 LTS ================ -This document describes installation from source. If you want to build a -``deb``, see :ref:`packaging-debian`. - -Installing Dependencies ------------------------ - -.. code-block:: console - - sudo apt update - sudo apt-get install \ - git autoconf automake libtool make libreadline-dev texinfo \ - pkg-config libpam0g-dev libjson-c-dev bison flex \ - libc-ares-dev python3-dev python3-sphinx \ - install-info build-essential libsnmp-dev perl \ - protobuf-c-compiler libprotobuf-c-dev \ - libcap-dev libelf-dev libunwind-dev - -.. include:: building-libunwind-note.rst - -.. include:: building-libyang.rst - -GRPC -^^^^ -If GRPC is enabled using ``--enable-grpc`` the following packages should be -installed. - -.. code-block:: console - - sudo apt-get install libgrpc++-dev protobuf-compiler-grpc - - -Config Rollbacks -^^^^^^^^^^^^^^^^ - -If config rollbacks are enabled using ``--enable-config-rollbacks`` -the sqlite3 developer package also should be installed. - -.. code-block:: console - - sudo apt install libsqlite3-dev - - -ZeroMQ -^^^^^^ - -.. code-block:: console - - sudo apt-get install libzmq5 libzmq3-dev - -Building & Installing FRR -------------------------- - -Add FRR user and groups -^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: console - - sudo groupadd -r -g 92 frr - sudo groupadd -r -g 85 frrvty - sudo adduser --system --ingroup frr --home /var/run/frr/ \ - --gecos "FRR suite" --shell /sbin/nologin frr - sudo usermod -a -G frrvty frr - -Compile -^^^^^^^ - -.. include:: include-compile.rst - -Install FRR configuration files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: console - - sudo install -m 775 -o frr -g frr -d /var/log/frr - sudo install -m 775 -o frr -g frrvty -d /etc/frr - sudo install -m 640 -o frr -g frrvty tools/etc/frr/vtysh.conf /etc/frr/vtysh.conf - sudo install -m 640 -o frr -g frr tools/etc/frr/frr.conf /etc/frr/frr.conf - sudo install -m 640 -o frr -g frr tools/etc/frr/daemons.conf /etc/frr/daemons.conf - sudo install -m 640 -o frr -g frr tools/etc/frr/daemons /etc/frr/daemons - -Tweak sysctls -^^^^^^^^^^^^^ - -Some sysctls need to be changed in order to enable IPv4/IPv6 forwarding and -MPLS (if supported by your platform). If your platform does not support MPLS, -skip the MPLS related configuration in this section. - -Edit :file:`/etc/sysctl.conf` and uncomment the following values (ignore the -other settings): - -:: - - # Uncomment the next line to enable packet forwarding for IPv4 - net.ipv4.ip_forward=1 - - # Uncomment the next line to enable packet forwarding for IPv6 - # Enabling this option disables Stateless Address Autoconfiguration - # based on Router Advertisements for this host - net.ipv6.conf.all.forwarding=1 - -Reboot or use ``sysctl -p`` to apply the same config to the running system. - -Add MPLS kernel modules -""""""""""""""""""""""" - -Ubuntu 20.04 ships with kernel 5.4; MPLS modules are present by default. To -enable, add the following lines to :file:`/etc/modules-load.d/modules.conf`: - -:: - - # Load MPLS Kernel Modules - mpls_router - mpls_iptunnel - - -And load the kernel modules on the running system: - -.. code-block:: console - - sudo modprobe mpls-router mpls-iptunnel - -If the above command returns an error, you may need to install the appropriate -or latest linux-modules-extra-<kernel-version>-generic package. For example -``apt-get install linux-modules-extra-`uname -r`-generic`` - -Enable MPLS Forwarding -"""""""""""""""""""""" - -Edit :file:`/etc/sysctl.conf` and the following lines. Make sure to add a line -equal to :file:`net.mpls.conf.eth0.input` for each interface used with MPLS. - -:: - - # Enable MPLS Label processing on all interfaces - net.mpls.conf.eth0.input=1 - net.mpls.conf.eth1.input=1 - net.mpls.conf.eth2.input=1 - net.mpls.platform_labels=100000 - -Install service files -^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: console - - sudo install -m 644 tools/frr.service /etc/systemd/system/frr.service - sudo systemctl enable frr - -Enable daemons -^^^^^^^^^^^^^^ - -Open :file:`/etc/frr/daemons` with your text editor of choice. Look for the -section with ``watchfrr_enable=...`` and ``zebra=...`` etc. Enable the daemons -as required by changing the value to ``yes``. - -Start FRR -^^^^^^^^^ - -.. code-block:: shell - - systemctl start frr +.. include:: building-frr-for-ubuntu2x04.rst diff --git a/doc/developer/building-frr-for-ubuntu2204.rst b/doc/developer/building-frr-for-ubuntu2204.rst index c898c3cd2c..726cf0a911 100644 --- a/doc/developer/building-frr-for-ubuntu2204.rst +++ b/doc/developer/building-frr-for-ubuntu2204.rst @@ -1,164 +1,4 @@ Ubuntu 22.04 LTS ================ -This document describes installation from source. If you want to build a -``deb``, see :ref:`packaging-debian`. - -Installing Dependencies ------------------------ - -.. code-block:: console - - sudo apt update - sudo apt-get install \ - git autoconf automake libtool make libreadline-dev texinfo \ - pkg-config libpam0g-dev libjson-c-dev bison flex \ - libc-ares-dev python3-dev python3-sphinx \ - install-info build-essential libsnmp-dev perl \ - libcap-dev libelf-dev libunwind-dev \ - protobuf-c-compiler libprotobuf-c-dev - -.. include:: building-libunwind-note.rst - -.. include:: building-libyang.rst - -GRPC -^^^^ -If GRPC is enabled using ``--enable-grpc`` the following packages should be -installed. - -.. code-block:: console - - sudo apt-get install libgrpc++-dev protobuf-compiler-grpc - - -Config Rollbacks -^^^^^^^^^^^^^^^^ - -If config rollbacks are enabled using ``--enable-config-rollbacks`` -the sqlite3 developer package also should be installed. - -.. code-block:: console - - sudo apt install libsqlite3-dev - - -ZeroMQ -^^^^^^ -This is optional - -.. code-block:: console - - sudo apt-get install libzmq5 libzmq3-dev - -Building & Installing FRR -------------------------- - -Add FRR user and groups -^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: console - - sudo groupadd -r -g 92 frr - sudo groupadd -r -g 85 frrvty - sudo adduser --system --ingroup frr --home /var/run/frr/ \ - --gecos "FRR suite" --shell /sbin/nologin frr - sudo usermod -a -G frrvty frr - -Compile -^^^^^^^ - -.. include:: include-compile.rst - -Install FRR configuration files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: console - - sudo install -m 775 -o frr -g frr -d /var/log/frr - sudo install -m 775 -o frr -g frrvty -d /etc/frr - sudo install -m 640 -o frr -g frrvty tools/etc/frr/vtysh.conf /etc/frr/vtysh.conf - sudo install -m 640 -o frr -g frr tools/etc/frr/frr.conf /etc/frr/frr.conf - sudo install -m 640 -o frr -g frr tools/etc/frr/daemons.conf /etc/frr/daemons.conf - sudo install -m 640 -o frr -g frr tools/etc/frr/daemons /etc/frr/daemons - -Tweak sysctls -^^^^^^^^^^^^^ - -Some sysctls need to be changed in order to enable IPv4/IPv6 forwarding and -MPLS (if supported by your platform). If your platform does not support MPLS, -skip the MPLS related configuration in this section. - -Edit :file:`/etc/sysctl.conf` and uncomment the following values (ignore the -other settings): - -:: - - # Uncomment the next line to enable packet forwarding for IPv4 - net.ipv4.ip_forward=1 - - # Uncomment the next line to enable packet forwarding for IPv6 - # Enabling this option disables Stateless Address Autoconfiguration - # based on Router Advertisements for this host - net.ipv6.conf.all.forwarding=1 - -Reboot or use ``sysctl -p`` to apply the same config to the running system. - -Add MPLS kernel modules -""""""""""""""""""""""" - -Ubuntu 20.04 ships with kernel 5.4; MPLS modules are present by default. To -enable, add the following lines to :file:`/etc/modules-load.d/modules.conf`: - -:: - - # Load MPLS Kernel Modules - mpls_router - mpls_iptunnel - - -And load the kernel modules on the running system: - -.. code-block:: console - - sudo modprobe mpls-router mpls-iptunnel - -If the above command returns an error, you may need to install the appropriate -or latest linux-modules-extra-<kernel-version>-generic package. For example -``apt-get install linux-modules-extra-`uname -r`-generic`` - -Enable MPLS Forwarding -"""""""""""""""""""""" - -Edit :file:`/etc/sysctl.conf` and the following lines. Make sure to add a line -equal to :file:`net.mpls.conf.eth0.input` for each interface used with MPLS. - -:: - - # Enable MPLS Label processing on all interfaces - net.mpls.conf.eth0.input=1 - net.mpls.conf.eth1.input=1 - net.mpls.conf.eth2.input=1 - net.mpls.platform_labels=100000 - -Install service files -^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: console - - sudo install -m 644 tools/frr.service /etc/systemd/system/frr.service - sudo systemctl enable frr - -Enable daemons -^^^^^^^^^^^^^^ - -Open :file:`/etc/frr/daemons` with your text editor of choice. Look for the -section with ``watchfrr_enable=...`` and ``zebra=...`` etc. Enable the daemons -as required by changing the value to ``yes``. - -Start FRR -^^^^^^^^^ - -.. code-block:: shell - - systemctl start frr +.. include:: building-frr-for-ubuntu2x04.rst diff --git a/doc/developer/building-frr-for-ubuntu2404.rst b/doc/developer/building-frr-for-ubuntu2404.rst new file mode 100644 index 0000000000..e6b264993c --- /dev/null +++ b/doc/developer/building-frr-for-ubuntu2404.rst @@ -0,0 +1,4 @@ +Ubuntu 24.04 LTS +================ + +.. include:: building-frr-for-ubuntu2x04.rst diff --git a/doc/developer/building-frr-for-ubuntu2x04.rst b/doc/developer/building-frr-for-ubuntu2x04.rst new file mode 100644 index 0000000000..78b45e141e --- /dev/null +++ b/doc/developer/building-frr-for-ubuntu2x04.rst @@ -0,0 +1,162 @@ + +This document describes installation from source. If you want to build a +``deb``, see :ref:`packaging-debian`. + +Installing Dependencies +----------------------- + +.. code-block:: console + + sudo apt update + sudo apt-get install \ + git autoconf automake libtool make libreadline-dev texinfo \ + pkg-config libpam0g-dev libjson-c-dev bison flex \ + libc-ares-dev python3-dev python3-sphinx \ + install-info build-essential libsnmp-dev perl \ + libcap-dev libelf-dev libunwind-dev \ + protobuf-c-compiler libprotobuf-c-dev + +.. include:: building-libunwind-note.rst + +.. include:: building-libyang.rst + +GRPC +^^^^ +If GRPC is enabled using ``--enable-grpc`` the following packages should be +installed. + +.. code-block:: console + + sudo apt-get install libgrpc++-dev protobuf-compiler-grpc + + +Config Rollbacks +^^^^^^^^^^^^^^^^ + +If config rollbacks are enabled using ``--enable-config-rollbacks`` +the sqlite3 developer package also should be installed. + +.. code-block:: console + + sudo apt install libsqlite3-dev + + +ZeroMQ +^^^^^^ +This is optional + +.. code-block:: console + + sudo apt-get install libzmq5 libzmq3-dev + +Building & Installing FRR +------------------------- + +Add FRR user and groups +^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: console + + sudo groupadd -r -g 92 frr + sudo groupadd -r -g 85 frrvty + sudo adduser --system --ingroup frr --home /var/run/frr/ \ + --gecos "FRR suite" --shell /sbin/nologin frr + sudo usermod -a -G frrvty frr + +Compile +^^^^^^^ + +.. include:: include-compile.rst + +Install FRR configuration files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: console + + sudo install -m 775 -o frr -g frr -d /var/log/frr + sudo install -m 775 -o frr -g frrvty -d /etc/frr + sudo install -m 640 -o frr -g frrvty tools/etc/frr/vtysh.conf /etc/frr/vtysh.conf + sudo install -m 640 -o frr -g frr tools/etc/frr/frr.conf /etc/frr/frr.conf + sudo install -m 640 -o frr -g frr tools/etc/frr/daemons.conf /etc/frr/daemons.conf + sudo install -m 640 -o frr -g frr tools/etc/frr/daemons /etc/frr/daemons + +Tweak sysctls +^^^^^^^^^^^^^ + +Some sysctls need to be changed in order to enable IPv4/IPv6 forwarding and +MPLS (if supported by your platform). If your platform does not support MPLS, +skip the MPLS related configuration in this section. + +Edit :file:`/etc/sysctl.conf` and uncomment the following values (ignore the +other settings): + +:: + + # Uncomment the next line to enable packet forwarding for IPv4 + net.ipv4.ip_forward=1 + + # Uncomment the next line to enable packet forwarding for IPv6 + # Enabling this option disables Stateless Address Autoconfiguration + # based on Router Advertisements for this host + net.ipv6.conf.all.forwarding=1 + +Reboot or use ``sysctl -p`` to apply the same config to the running system. + +Add MPLS kernel modules +""""""""""""""""""""""" + +Ubuntu 20.04 ships with kernel 5.4; MPLS modules are present by default. To +enable, add the following lines to :file:`/etc/modules-load.d/modules.conf`: + +:: + + # Load MPLS Kernel Modules + mpls_router + mpls_iptunnel + + +And load the kernel modules on the running system: + +.. code-block:: console + + sudo modprobe mpls-router mpls-iptunnel + +If the above command returns an error, you may need to install the appropriate +or latest linux-modules-extra-<kernel-version>-generic package. For example +``apt-get install linux-modules-extra-`uname -r`-generic`` + +Enable MPLS Forwarding +"""""""""""""""""""""" + +Edit :file:`/etc/sysctl.conf` and the following lines. Make sure to add a line +equal to :file:`net.mpls.conf.eth0.input` for each interface used with MPLS. + +:: + + # Enable MPLS Label processing on all interfaces + net.mpls.conf.eth0.input=1 + net.mpls.conf.eth1.input=1 + net.mpls.conf.eth2.input=1 + net.mpls.platform_labels=100000 + +Install service files +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: console + + sudo install -m 644 tools/frr.service /etc/systemd/system/frr.service + sudo systemctl enable frr + +Enable daemons +^^^^^^^^^^^^^^ + +Open :file:`/etc/frr/daemons` with your text editor of choice. Look for the +section with ``watchfrr_enable=...`` and ``zebra=...`` etc. Enable the daemons +as required by changing the value to ``yes``. + +Start FRR +^^^^^^^^^ + +.. code-block:: shell + + systemctl start frr diff --git a/doc/developer/mgmtd-dev.rst b/doc/developer/mgmtd-dev.rst index 4c56cadb28..6cbd617f8c 100644 --- a/doc/developer/mgmtd-dev.rst +++ b/doc/developer/mgmtd-dev.rst @@ -160,14 +160,19 @@ Back-End Interface: should be destroyed with a call to `mgmt_be_client_destroy` and to be safe NULL out the global `mgmt_be_client` variable. -#. In ``mgmtd/mgmt_be_adapter.c`` add xpath prefix mappings to a one or both - mapping arrays (``be_client_config_xpaths`` and ``be_client_oper_xpaths``) to - direct ``mgmtd`` to send config and oper-state requests to your daemon. NOTE: - make sure to include library supported xpaths prefixes as well (e.g., +#. In ``mgmtd/mgmt_be_adapter.c`` add xpath prefix mappings to a each of the + mapping arrays (``be_client_config_xpaths``, ``be_client_oper_xpaths``, and + ``be_client_rpc_xpaths``) to direct ``mgmtd`` to send config, oper-state, and + RPC requests to your daemon. + + NOTE: make sure to include library supported xpaths prefixes as well (e.g., "/frr-interface:lib"). A good way to figure these paths out are to look in each of the YANG modules that the daemon uses and include each of their paths in the array. +#. In ``python/xref2vtysh.py`` add ``VTYSH_xxxD`` (for client xxx) to + ``lib/mgmt_be_client.c`` entry in the ``daemon_falgs`` dictionary. + Add YANG and CLI into MGMTD ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/developer/packaging-redhat.rst b/doc/developer/packaging-redhat.rst index 8037873461..277de5d16f 100644 --- a/doc/developer/packaging-redhat.rst +++ b/doc/developer/packaging-redhat.rst @@ -3,7 +3,7 @@ Packaging Red Hat ================= -Tested on CentOS 6, CentOS 7, CentOS 8 and Fedora 24. +Tested on CentOS 6, CentOS 7, Rocky 8 and Fedora 24. 1. On CentOS 6, refer to :ref:`building-centos6` for details on installing sufficiently up-to-date package versions to enable building FRR. @@ -18,14 +18,14 @@ Tested on CentOS 6, CentOS 7, CentOS 8 and Fedora 24. yum install rpm-build net-snmp-devel pam-devel libcap-devel - For CentOS 7 and CentOS 8, the package will be built using python3 + For CentOS 7 and Rocky 8, the package will be built using python3 and requires additional python3 packages:: yum install python3-devel python3-sphinx .. note:: - For CentOS 8 you need to install ``platform-python-devel`` package + For Rocky 8 you need to install ``platform-python-devel`` package to provide ``/usr/bin/pathfix.py``:: yum install platform-python-devel @@ -33,7 +33,7 @@ Tested on CentOS 6, CentOS 7, CentOS 8 and Fedora 24. If ``yum`` is not present on your system, use ``dnf`` instead. - You should enable ``PowerTools`` repo if using CentOS 8 which + You should enable ``PowerTools`` repo if using Rocky 8 which is disabled by default. 4. Checkout FRR:: @@ -88,11 +88,18 @@ Tested on CentOS 6, CentOS 7, CentOS 8 and Fedora 24. %{!?with_watchfrr: %global with_watchfrr 1 } %{!?with_pathd: %global with_pathd 1 } %{!?with_grpc: %global with_grpc 0 } + %{!?with_rpki: %global with_rpki 1 } + %{!?with_docs: %global with_docs 1 } 8. Build the RPM:: rpmbuild --define "_topdir `pwd`/rpmbuild" -ba rpmbuild/SPECS/frr.spec + To override :file:`rpm/SPECS/frr.spec` defaults on the rpmbuild + commandline with: + + rpmbuild --define 'variable value' + If building with RPKI, then download and install the additional RPKI packages from https://ci1.netdef.org/browse/RPKI-RTRLIB/latestSuccessful/artifact diff --git a/doc/developer/subdir.am b/doc/developer/subdir.am index 67f42bc11f..9f6986cd33 100644 --- a/doc/developer/subdir.am +++ b/doc/developer/subdir.am @@ -28,6 +28,7 @@ dev_RSTFILES = \ doc/developer/building-frr-for-ubuntu1804.rst \ doc/developer/building-frr-for-ubuntu2004.rst \ doc/developer/building-frr-for-ubuntu2204.rst \ + doc/developer/building-doc.rst \ doc/developer/building-libunwind-note.rst \ doc/developer/building-libyang.rst \ doc/developer/building.rst \ diff --git a/doc/manpages/frr-zebra.rst b/doc/manpages/frr-zebra.rst index 6cc46b806d..356c128e30 100644 --- a/doc/manpages/frr-zebra.rst +++ b/doc/manpages/frr-zebra.rst @@ -38,6 +38,8 @@ OPTIONS available for the |DAEMON| command: Enable namespace VRF backend. By default, the VRF backend relies on VRF-lite support from the Linux kernel. This option permits discovering Linux named network namespaces and mapping it to FRR VRF contexts. + This option is deprecated. Please use the global -w option instead. + ROUTES ------ diff --git a/doc/user/_static/overrides.js b/doc/user/_static/overrides.js index 73bf6123b5..f6af539bfa 100644 --- a/doc/user/_static/overrides.js +++ b/doc/user/_static/overrides.js @@ -5,7 +5,7 @@ */ $(document).ready(function() { $("span.mark:contains('Y')" ).addClass("mark-y" ).parent("td").addClass("mark"); - $("span.mark:contains('≥')" ).addClass("mark-geq").parent("td").addClass("mark"); + $("span.mark:contains('>=')").addClass("mark-geq").parent("td").addClass("mark"); $("span.mark:contains('N')" ).addClass("mark-n" ).parent("td").addClass("mark"); $("span.mark:contains('CP')").addClass("mark-cp" ).parent("td").addClass("mark"); $("span.mark:contains('†')" ).addClass("mark-dag").parent("td").addClass("mark"); diff --git a/doc/user/about.rst b/doc/user/about.rst index e16ed7e3cc..d9470f5f3e 100644 --- a/doc/user/about.rst +++ b/doc/user/about.rst @@ -153,7 +153,7 @@ feature you're interested in, it should be supported on your platform. .. comment - the :mark:`X` pieces mesh with a little bit of JavaScript and CSS in _static/overrides.{js,css} respectively. The JS code looks at the - presence of the 'Y' 'N' '≥' '†' or 'CP' strings. This seemed to be the + presence of the 'Y' 'N' '>=' '†' or 'CP' strings. This seemed to be the best / least intrusive way of getting a nice table in HTML. The table will look somewhat shoddy on other sphinx targets like PDF or info (but should still be readable.) @@ -165,9 +165,9 @@ feature you're interested in, it should be supported on your platform. +-----------------------------------+----------------+--------------+------------+------------+ | `zebra` | :mark:`Y` | :mark:`Y` | :mark:`Y` | :mark:`Y` | +-----------------------------------+----------------+--------------+------------+------------+ -| VRF | :mark:`≥4.8` | :mark:`N` | :mark:`N` | :mark:`N` | +| VRF | :mark:`>=4.8` | :mark:`N` | :mark:`N` | :mark:`N` | +-----------------------------------+----------------+--------------+------------+------------+ -| MPLS | :mark:`≥4.5` | :mark:`Y` | :mark:`N` | :mark:`N` | +| MPLS | :mark:`>=4.5` | :mark:`Y` | :mark:`N` | :mark:`N` | +-----------------------------------+----------------+--------------+------------+------------+ | `pbrd` (Policy Routing) | :mark:`Y` | :mark:`N` | :mark:`N` | :mark:`N` | +-----------------------------------+----------------+--------------+------------+------------+ @@ -175,21 +175,21 @@ feature you're interested in, it should be supported on your platform. +-----------------------------------+----------------+--------------+------------+------------+ | `bgpd` (BGP) | :mark:`Y` | :mark:`Y` | :mark:`Y` | :mark:`Y` | +-----------------------------------+----------------+--------------+------------+------------+ -| VRF / L3VPN | :mark:`≥4.8` | :mark:`CP` | :mark:`CP` | :mark:`CP` | +| VRF / L3VPN | :mark:`>=4.8` | :mark:`CP` | :mark:`CP` | :mark:`CP` | | | :mark:`†4.3` | | | | +-----------------------------------+----------------+--------------+------------+------------+ -| EVPN | :mark:`≥4.18` | :mark:`CP` | :mark:`CP` | :mark:`CP` | +| EVPN | :mark:`>=4.18` | :mark:`CP` | :mark:`CP` | :mark:`CP` | | | :mark:`†4.9` | | | | +-----------------------------------+----------------+--------------+------------+------------+ | VNC (Virtual Network Control) | :mark:`CP` | :mark:`CP` | :mark:`CP` | :mark:`CP` | +-----------------------------------+----------------+--------------+------------+------------+ | Flowspec | :mark:`CP` | :mark:`CP` | :mark:`CP` | :mark:`CP` | +-----------------------------------+----------------+--------------+------------+------------+ -| `ldpd` (LDP) | :mark:`≥4.5` | :mark:`Y` | :mark:`N` | :mark:`N` | +| `ldpd` (LDP) | :mark:`>=4.5` | :mark:`Y` | :mark:`N` | :mark:`N` | +-----------------------------------+----------------+--------------+------------+------------+ -| VPWS / PW | :mark:`N` | :mark:`≥5.8` | :mark:`N` | :mark:`N` | +| VPWS / PW | :mark:`N` | :mark:`>=5.8`| :mark:`N` | :mark:`N` | +-----------------------------------+----------------+--------------+------------+------------+ -| VPLS | :mark:`N` | :mark:`≥5.8` | :mark:`N` | :mark:`N` | +| VPLS | :mark:`N` | :mark:`>=5.8`| :mark:`N` | :mark:`N` | +-----------------------------------+----------------+--------------+------------+------------+ | `nhrpd` (NHRP) | :mark:`Y` | :mark:`N` | :mark:`N` | :mark:`N` | +-----------------------------------+----------------+--------------+------------+------------+ @@ -197,7 +197,7 @@ feature you're interested in, it should be supported on your platform. +-----------------------------------+----------------+--------------+------------+------------+ | `ospfd` (OSPFv2) | :mark:`Y` | :mark:`Y` | :mark:`Y` | :mark:`Y` | +-----------------------------------+----------------+--------------+------------+------------+ -| Segment Routing | :mark:`≥4.12` | :mark:`N` | :mark:`N` | :mark:`N` | +| Segment Routing | :mark:`>=4.12` | :mark:`N` | :mark:`N` | :mark:`N` | +-----------------------------------+----------------+--------------+------------+------------+ | `ospf6d` (OSPFv3) | :mark:`Y` | :mark:`Y` | :mark:`Y` | :mark:`Y` | +-----------------------------------+----------------+--------------+------------+------------+ @@ -215,21 +215,21 @@ feature you're interested in, it should be supported on your platform. +-----------------------------------+----------------+--------------+------------+------------+ | **Multicast Routing** | | | | | +-----------------------------------+----------------+--------------+------------+------------+ -| `pimd` (PIM) | :mark:`≥4.19` | :mark:`N` | :mark:`Y` | :mark:`Y` | +| `pimd` (PIM) | :mark:`>=4.19` | :mark:`N` | :mark:`Y` | :mark:`Y` | +-----------------------------------+----------------+--------------+------------+------------+ | SSM (Source Specific) | :mark:`Y` | :mark:`N` | :mark:`Y` | :mark:`Y` | +-----------------------------------+----------------+--------------+------------+------------+ | ASM (Any Source) | :mark:`Y` | :mark:`N` | :mark:`N` | :mark:`N` | +-----------------------------------+----------------+--------------+------------+------------+ -| EVPN BUM Forwarding | :mark:`≥5.0` | :mark:`N` | :mark:`N` | :mark:`N` | +| EVPN BUM Forwarding | :mark:`>=5.0` | :mark:`N` | :mark:`N` | :mark:`N` | +-----------------------------------+----------------+--------------+------------+------------+ -| `vrrpd` (VRRP) | :mark:`≥5.1` | :mark:`N` | :mark:`N` | :mark:`N` | +| `vrrpd` (VRRP) | :mark:`>=5.1` | :mark:`N` | :mark:`N` | :mark:`N` | +-----------------------------------+----------------+--------------+------------+------------+ The indicators have the following semantics: * :mark:`Y` - daemon/feature fully functional -* :mark:`≥X.X` - fully functional with kernel version X.X or newer +* :mark:`>=X.X` - fully functional with kernel version X.X or newer * :mark:`†X.X` - restricted functionality or impaired performance with kernel version X.X or newer * :mark:`CP` - control plane only (i.e. BGP route server / route reflector) * :mark:`N` - daemon/feature not supported by operating system diff --git a/doc/user/basic.rst b/doc/user/basic.rst index 5fdd1887fa..b2d47a38eb 100644 --- a/doc/user/basic.rst +++ b/doc/user/basic.rst @@ -754,6 +754,17 @@ These options apply to all |PACKAGE_NAME| daemons. be added to all files that use the statedir. If you have "/var/run/frr" as the default statedir then it will become "/var/run/frr/<namespace>". +.. option:: -w, --vrfwnetns + + Enable namespace VRF backend. By default, the VRF backend relies on VRF-lite + support from the Linux kernel. This option permits discovering Linux named + network namespaces and mapping them to FRR VRF contexts. This option must be + the same for all running daemons. The easiest way to pass the same option to + all daemons is to use the ``frr_global_options`` variable in the + :ref:`Daemons Configuration File <daemons-configuration-file>`. + + .. seealso:: :ref:`zebra-vrf` + .. option:: -o, --vrfdefaultname <name> Set the name used for the *Default VRF* in CLI commands and YANG models. diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index dafcac7c84..1493c2fb98 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -2693,6 +2693,12 @@ The following commands can be used in route maps: happen only when BGP updates have completely same communities value specified in the community list. +.. clicmd:: match community-limit (0-65535) + + This command matches BGP updates that use community list, and with a community + list count less or equal than the defined limit. Setting community-limit to 0 + will only match BGP updates with no community. + .. clicmd:: set community <none|COMMUNITY> additive This command sets the community value in BGP updates. If the attribute is @@ -4149,6 +4155,11 @@ Debugging Enable or disable debugging of communications between *bgpd* and *zebra*. +.. clicmd:: debug bgp aggregate [prefix <A.B.C.D/M|X:X::X:X/M>] + + Enable or disable debugging of route aggregation, either for one or more + aggregate addresses or for all aggregate addresses. + Dumping Messages and Routing Tables ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -4350,6 +4361,10 @@ displays IPv6 routing table. If ``detail`` option is specified after ``json``, more verbose JSON output will be displayed. +.. clicmd:: show bgp router [json] + + This command displays information related BGP router and Graceful Restart. + Some other commands provide additional options for filtering the output. .. clicmd:: show [ip] bgp regexp LINE diff --git a/doc/user/bmp.rst b/doc/user/bmp.rst index 14d0849b34..07c3c1c8bd 100644 --- a/doc/user/bmp.rst +++ b/doc/user/bmp.rst @@ -171,3 +171,8 @@ associated with a particular ``bmp targets``: All BGP neighbors are included in Route Mirroring. Options to select a subset of BGP sessions may be added in the future. + +.. clicmd:: bmp import-vrf-view VRF_OR_VIEW_NAME + + Perform Route Mirroring and Route Monitoring from an other BGP + instance. diff --git a/doc/user/pim.rst b/doc/user/pim.rst index ff45f21b56..c139e64880 100644 --- a/doc/user/pim.rst +++ b/doc/user/pim.rst @@ -217,7 +217,7 @@ PIM Routers never do SM over. This command is vrf aware, to configure for a vrf, specify the vrf in the router pim block. -.. clicmd:: rpf-lookup-mode MODE +.. clicmd:: rpf-lookup-mode MODE [group-list PREFIX_LIST] [source-list PREFIX_LIST] MODE sets the method used to perform RPF lookups. Supported modes: @@ -246,6 +246,18 @@ PIM Routers configured to make the configuration immune against possible changes in what the default behavior is. + If a group and/or source prefix list is provided, then the RPF lookup mode + will only apply to source, group addresses that match the given prefix list(s). + Not all RPF lookups have a valid group address when performing a lookup, e.g. RPF + to an RP only does a lookup to the RP address and has no specific group. + Lookups that do not have a specific group will only use lookup modes that do not + specify a group-list. + A global rpf lookup mode that does not have a group or source list is always installed + and, as documented above, uses the ``mrib-then-urib`` mode by default. + This can be changed with an rpf-lookup-mode MODE that does not specify group or source lists. + There can be any number of rpf lookup modes, as long as the combination of group and source + list is unique. + .. warning:: Unreachable routes do not receive special treatment and do not cause diff --git a/doc/user/static.rst b/doc/user/static.rst index 922c71a073..5bf5004a66 100644 --- a/doc/user/static.rst +++ b/doc/user/static.rst @@ -176,3 +176,52 @@ multiple segments instructions. router# show ipv6 route [..] S>* 2005::/64 [1/0] is directly connected, ens3, seg6 2001:db8:aaaa::7,2002::4,2002::3,2002::2, weight 1, 00:00:06 + +SRv6 Static SIDs Commands +========================= + +.. clicmd:: segment-routing + + Move from configure mode to segment-routing node. + +.. clicmd:: srv6 + + Move from segment-routing node to srv6 node. + +.. clicmd:: static-sids + + Move from srv6 node to static-sids node. In this static-sids node, user can + configure static SRv6 SIDs. + +.. clicmd:: sid X:X::X:X/M locator NAME behavior <uN|uDT4|uDT6|uDT46> [vrf VRF] + + Specify the locator sid manually. Configuring a local sid in a purely static mode + by specifying the sid value would generate a unique SID. + This feature will support the configuration of static SRv6 decapsulation on the system. + + It supports four parameter options, corresponding to the following functions: + uN, uDT4, uDT6, uDT46 + + When configuring the local sid, if the action is set to 'uN', no vrf should be set. + While for any other action, it is necessary to specify a specific vrf. + +:: + + router# configure terminal + router(config)# segment-routing + router(config-sr)# srv6 + router(config-srv6)# static-sids + router(config-srv6-sids)# sid fcbb:bbbb:1:fe01::/64 locator LOC1 behavior uDT6 vrf Vrf1 + router(config-srv6-sids)# sid fcbb:bbbb:1:fe02::/64 locator LOC1 behavior uDT4 vrf Vrf1 + router(config-srv6-sids)# sid fcbb:bbbb:1:fe03::/64 locator LOC1 behavior uDT46 vrf Vrf2 + + router(config-srv6-locator)# show run + ... + segment-routing + srv6 + static-sids + sid fcbb:bbbb:1:fe01::/64 locator LOC1 behavior uDT6 vrf Vrf1 + sid fcbb:bbbb:1:fe02::/64 locator LOC1 behavior uDT4 vrf Vrf1 + sid fcbb:bbbb:1:fe03::/64 locator LOC1 behavior uDT46 vrf Vrf2 + ! + ...
\ No newline at end of file diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst index ac29b1c7d4..ef3a619853 100644 --- a/doc/user/zebra.rst +++ b/doc/user/zebra.rst @@ -53,6 +53,8 @@ Besides the common invocation options (:ref:`common-invocation-options`), the VRF defined by *Zebra*, as usual. If this option is specified when running *Zebra*, one must also specify the same option for *mgmtd*. + This options is deprecated. Please use the global -w option instead. + .. seealso:: :ref:`zebra-vrf` .. option:: -z <path_to_socket>, --socket <path_to_socket> diff --git a/docker/ubuntu-ci/Dockerfile b/docker/ubuntu-ci/Dockerfile index aaad3bc172..0bfcb51878 100644 --- a/docker/ubuntu-ci/Dockerfile +++ b/docker/ubuntu-ci/Dockerfile @@ -2,7 +2,6 @@ ARG UBUNTU_VERSION=22.04 FROM ubuntu:$UBUNTU_VERSION ARG DEBIAN_FRONTEND=noninteractive -ENV APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn # Update and install build requirements. RUN apt update && apt upgrade -y && \ @@ -77,14 +76,15 @@ RUN apt update && apt upgrade -y && \ wget https://raw.githubusercontent.com/FRRouting/frr-mibs/main/iana/IANA-IPPM-METRICS-REGISTRY-MIB -O /usr/share/snmp/mibs/iana/IANA-IPPM-METRICS-REGISTRY-MIB && \ wget https://raw.githubusercontent.com/FRRouting/frr-mibs/main/ietf/SNMPv2-PDU -O /usr/share/snmp/mibs/ietf/SNMPv2-PDU && \ wget https://raw.githubusercontent.com/FRRouting/frr-mibs/main/ietf/IPATM-IPMC-MIB -O /usr/share/snmp/mibs/ietf/IPATM-IPMC-MIB && \ + rm -f /usr/lib/python3.*/EXTERNALLY-MANAGED && \ python3 -m pip install wheel && \ - python3 -m pip install 'protobuf<4' grpcio grpcio-tools && \ + python3 -m pip install protobuf grpcio grpcio-tools && \ python3 -m pip install 'pytest>=6.2.4' 'pytest-xdist>=2.3.0' && \ python3 -m pip install 'scapy>=2.4.5' && \ python3 -m pip install xmltodict && \ python3 -m pip install git+https://github.com/Exa-Networks/exabgp@0659057837cd6c6351579e9f0fa47e9fb7de7311 -ARG UID=1000 +ARG UID=1010 RUN groupadd -r -g 92 frr && \ groupadd -r -g 85 frrvty && \ adduser --system --ingroup frr --home /home/frr \ diff --git a/docker/ubuntu22-ci/README.md b/docker/ubuntu22-ci/README.md index 617192eb71..116b3c0e4a 100644 --- a/docker/ubuntu22-ci/README.md +++ b/docker/ubuntu22-ci/README.md @@ -5,7 +5,7 @@ This builds an ubuntu 22.04 container for dev / test # Build ``` -docker build -t frr-ubuntu22:latest -f docker/ubuntu-ci/Dockerfile . +docker build -t frr-ubuntu22:latest --build-arg=UBUNTU_VERSION=22.04 -f docker/ubuntu-ci/Dockerfile . ``` # Run diff --git a/docker/ubuntu24-ci/README.md b/docker/ubuntu24-ci/README.md new file mode 100644 index 0000000000..38ba0ee171 --- /dev/null +++ b/docker/ubuntu24-ci/README.md @@ -0,0 +1,66 @@ +# Ubuntu 24.04 + +This builds an ubuntu 24.04 container for dev / test + +# Build + +``` +docker build -t frr-ubuntu24:latest --build-arg=UBUNTU_VERSION=24.04 -f docker/ubuntu-ci/Dockerfile . +``` + +# Run + +``` +docker run -d --init --privileged --name frr-ubuntu24 --mount type=bind,source=/lib/modules,target=/lib/modules frr-ubuntu24:latest +``` + +# Running full topotest (container stops at end) + +``` +docker run --init -it --privileged --name frr-ubuntu24 \ + -v /lib/modules:/lib/modules frr-ubuntu24:latest \ + bash -c 'cd /home/frr/frr/tests/topotests; sudo pytest -nauto --dist=loadfile' +``` + +# Extract results from the above run into `run-results` dir and analyze + +``` +tests/topotests/analyze.py -C frr-ubuntu24 -Ar run-results +``` + +# Extract coverage from a stopped container into host FRR source tree + +``` +docker export frr-ubuntu24 | tar --strip=3 --wildcards -vx '*.gc??' +lcov -b $(pwd) --capture --directory . --output-file=coverage.info +``` + +# make check + +``` +docker exec frr-ubuntu24 bash -c 'cd ~/frr ; make check' +``` + +# interactive bash + +``` +docker exec -it frr-ubuntu24 bash +``` + +# Run a specific topotest + +``` +docker exec frr-ubuntu24 bash -c 'cd ~/frr/tests/topotests ; sudo pytest ospf_topo1/test_ospf_topo1.py' +``` + +# stop & remove container + +``` +docker stop frr-ubuntu24 ; docker rm frr-ubuntu24 +``` + +# remove image + +``` +docker rmi frr-ubuntu24:latest +``` diff --git a/isisd/isis_cli.c b/isisd/isis_cli.c index 652efee89a..735e39a377 100644 --- a/isisd/isis_cli.c +++ b/isisd/isis_cli.c @@ -626,10 +626,17 @@ DEFPY_YANG(domain_passwd, domain_passwd_cmd, } DEFPY_YANG(no_area_passwd, no_area_passwd_cmd, - "no <area-password|domain-password>$cmd", + "no <area-password|domain-password>$cmd [<clear|md5>$pwd_type WORD$pwd [authenticate snp <send-only|validate>$snp]]", NO_STR "Configure the authentication password for an area\n" - "Set the authentication password for a routing domain\n") + "Set the authentication password for a routing domain\n" + "Clear-text authentication type\n" + "MD5 authentication type\n" + "Level-wide password\n" + "Authentication\n" + "SNP PDUs\n" + "Send but do not check PDUs on receiving\n" + "Send and check PDUs on receiving\n") { nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL); diff --git a/isisd/isis_srv6.c b/isisd/isis_srv6.c index 2348bd043a..4b97b5372e 100644 --- a/isisd/isis_srv6.c +++ b/isisd/isis_srv6.c @@ -698,7 +698,7 @@ void isis_srv6_area_init(struct isis_area *area) srv6db->config.max_end_pop_msd = ISIS_DEFAULT_SRV6_MAX_END_POP_MSD; srv6db->config.max_h_encaps_msd = ISIS_DEFAULT_SRV6_MAX_H_ENCAPS_MSD; srv6db->config.max_end_d_msd = ISIS_DEFAULT_SRV6_MAX_END_D_MSD; - strlcpy(srv6db->config.srv6_ifname, ISIS_DEFAULT_SRV6_IFNAME, sizeof(srv6db->config.srv6_ifname)); + strlcpy(srv6db->config.srv6_ifname, DEFAULT_SRV6_IFNAME, sizeof(srv6db->config.srv6_ifname)); #endif /* Initialize SRv6 Locator chunks list */ diff --git a/isisd/isis_srv6.h b/isisd/isis_srv6.h index bde14965f6..eeb76c0b86 100644 --- a/isisd/isis_srv6.h +++ b/isisd/isis_srv6.h @@ -16,8 +16,7 @@ #define ISIS_DEFAULT_SRV6_MAX_SEG_LEFT_MSD 3 #define ISIS_DEFAULT_SRV6_MAX_END_POP_MSD 3 #define ISIS_DEFAULT_SRV6_MAX_H_ENCAPS_MSD 2 -#define ISIS_DEFAULT_SRV6_MAX_END_D_MSD 5 -#define ISIS_DEFAULT_SRV6_IFNAME "sr0" +#define ISIS_DEFAULT_SRV6_MAX_END_D_MSD 5 /* SRv6 SID structure */ struct isis_srv6_sid_structure { diff --git a/isisd/isis_tlvs.c b/isisd/isis_tlvs.c index 18b13547a5..8c97dcda2f 100644 --- a/isisd/isis_tlvs.c +++ b/isisd/isis_tlvs.c @@ -1053,9 +1053,8 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, adj->algorithm)); json_object_int_add(srv6_endx_sid_json, "weight", adj->weight); - json_object_string_add(srv6_endx_sid_json, - "behavior", - seg6local_action2str( + json_object_string_add(srv6_endx_sid_json, "behavior", + srv6_endpoint_behavior_codepoint2str( adj->behavior)); json_object_boolean_add( srv6_endx_sid_json, "flagB", @@ -1081,22 +1080,17 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, for (adj = (struct isis_srv6_endx_sid_subtlv *) exts->srv6_endx_sid.head; adj; adj = adj->next) { - sbuf_push( - buf, indent, - "SRv6 End.X SID: %pI6, Algorithm: %s, Weight: %hhu, Endpoint Behavior: %s, Flags: B:%c, S:%c, P:%c\n", - &adj->sid, - sr_algorithm_string(adj->algorithm), - adj->weight, - seg6local_action2str(adj->behavior), - adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_BFLG - ? '1' - : '0', - adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_SFLG - ? '1' - : '0', - adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_PFLG - ? '1' - : '0'); + sbuf_push(buf, indent, + "SRv6 End.X SID: %pI6, Algorithm: %s, Weight: %hhu, Endpoint Behavior: %s, Flags: B:%c, S:%c, P:%c\n", + &adj->sid, sr_algorithm_string(adj->algorithm), + adj->weight, + srv6_endpoint_behavior_codepoint2str(adj->behavior), + adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_BFLG ? '1' + : '0', + adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_SFLG ? '1' + : '0', + adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_PFLG ? '1' + : '0'); if (adj->subsubtlvs) isis_format_subsubtlvs(adj->subsubtlvs, buf, NULL, @@ -1131,9 +1125,8 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, lan->algorithm)); json_object_int_add(srv6_lan_endx_sid_json, "weight", lan->weight); - json_object_string_add(srv6_lan_endx_sid_json, - "behavior", - seg6local_action2str( + json_object_string_add(srv6_lan_endx_sid_json, "behavior", + srv6_endpoint_behavior_codepoint2str( lan->behavior)); json_object_boolean_add( srv6_lan_endx_sid_json, "flagB", @@ -1162,24 +1155,19 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, for (lan = (struct isis_srv6_lan_endx_sid_subtlv *) exts->srv6_lan_endx_sid.head; lan; lan = lan->next) { - sbuf_push( - buf, indent, - "SRv6 Lan End.X SID: %pI6, Algorithm: %s, Weight: %hhu, Endpoint Behavior: %s, Flags: B:%c, S:%c, P:%c " - "Neighbor-ID: %pSY\n", - &lan->sid, - sr_algorithm_string(lan->algorithm), - lan->weight, - seg6local_action2str(lan->behavior), - lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_BFLG - ? '1' - : '0', - lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_SFLG - ? '1' - : '0', - lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_PFLG - ? '1' - : '0', - lan->neighbor_id); + sbuf_push(buf, indent, + "SRv6 Lan End.X SID: %pI6, Algorithm: %s, Weight: %hhu, Endpoint Behavior: %s, Flags: B:%c, S:%c, P:%c " + "Neighbor-ID: %pSY\n", + &lan->sid, sr_algorithm_string(lan->algorithm), + lan->weight, + srv6_endpoint_behavior_codepoint2str(lan->behavior), + lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_BFLG ? '1' + : '0', + lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_SFLG ? '1' + : '0', + lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_PFLG ? '1' + : '0', + lan->neighbor_id); if (lan->subsubtlvs) isis_format_subsubtlvs(lan->subsubtlvs, buf, NULL, @@ -2823,7 +2811,7 @@ static void format_item_srv6_end_sid(uint16_t mtid, struct isis_item *i, sid_json = json_object_new_object(); json_object_object_add(json, "srv6EndSid", sid_json); json_object_string_add(sid_json, "endpointBehavior", - seg6local_action2str(sid->behavior)); + srv6_endpoint_behavior_codepoint2str(sid->behavior)); json_object_string_addf(sid_json, "sidValue", "%pI6", &sid->sid); if (sid->subsubtlvs) { struct json_object *subtlvs_json; @@ -2836,7 +2824,7 @@ static void format_item_srv6_end_sid(uint16_t mtid, struct isis_item *i, } else { sbuf_push(buf, indent, "SRv6 End SID "); sbuf_push(buf, 0, "Endpoint Behavior: %s, ", - seg6local_action2str(sid->behavior)); + srv6_endpoint_behavior_codepoint2str(sid->behavior)); sbuf_push(buf, 0, "SID value: %pI6\n", &sid->sid); if (sid->subsubtlvs) { diff --git a/isisd/isis_tlvs.h b/isisd/isis_tlvs.h index c64bbf7f69..5798d318f2 100644 --- a/isisd/isis_tlvs.h +++ b/isisd/isis_tlvs.h @@ -777,12 +777,6 @@ struct list *isis_fragment_tlvs(struct isis_tlvs *tlvs, size_t size); #define ISIS_MT_AT_MASK 0x4000 #endif -/* RFC 8919 */ -#define ISIS_SABM_FLAG_R 0x80 /* RSVP-TE */ -#define ISIS_SABM_FLAG_S 0x40 /* Segment Routing Policy */ -#define ISIS_SABM_FLAG_L 0x20 /* Loop-Free Alternate */ -#define ISIS_SABM_FLAG_X 0x10 /* Flex-Algorithm - RFC9350 */ - void isis_tlvs_add_auth(struct isis_tlvs *tlvs, struct isis_passwd *passwd); void isis_tlvs_add_area_addresses(struct isis_tlvs *tlvs, struct list *addresses); diff --git a/lib/command.h b/lib/command.h index c60751789f..dfd732893b 100644 --- a/lib/command.h +++ b/lib/command.h @@ -154,6 +154,7 @@ enum node_type { PCEP_PCE_NODE, /* PCE configuration node */ PCEP_PCC_NODE, /* PCC configuration node */ SRV6_NODE, /* SRv6 node */ + SRV6_SIDS_NODE, /* SRv6 SIDs node */ SRV6_LOCS_NODE, /* SRv6 locators node */ SRV6_LOC_NODE, /* SRv6 locator node */ SRV6_ENCAP_NODE, /* SRv6 encapsulation node */ diff --git a/lib/darr.h b/lib/darr.h index 121e3dd14e..084c2a103a 100644 --- a/lib/darr.h +++ b/lib/darr.h @@ -571,16 +571,16 @@ void *__darr_resize(void *a, uint count, size_t esize, struct memtype *mt); * Return: * The dynamic_array D with the new string content. */ -#define darr_in_strcat(D, S) \ - ({ \ - uint __dlen = darr_strlen(D); \ - uint __slen = strlen(S); \ - darr_ensure_cap_mt(D, __dlen + __slen + 1, MTYPE_DARR_STR); \ - if (darr_len(D) == 0) \ - *darr_append(D) = 0; \ - memcpy(darr_last(D), (S), __slen + 1); \ - _darr_len(D) += __slen; \ - D; \ +#define darr_in_strcat(D, S) \ + ({ \ + uint __dlen = darr_strlen(D); \ + uint __slen = strlen(S); \ + darr_ensure_cap_mt(D, __dlen + __slen + 1, MTYPE_DARR_STR); \ + if (darr_len(D) == 0) \ + *darr_append(D) = 0; \ + memcpy(&(D)[darr_strlen(D)] /* darr_last(D) clangSA :( */, (S), __slen + 1); \ + _darr_len(D) += __slen; \ + D; \ }) /** diff --git a/lib/event.c b/lib/event.c index cfe8c3adc0..d95b3021a7 100644 --- a/lib/event.c +++ b/lib/event.c @@ -429,9 +429,6 @@ DEFUN_NOSH (show_event_poll, return CMD_SUCCESS; } -#if CONFDATE > 20241231 -CPP_NOTICE("Remove `clear thread cpu` command") -#endif DEFUN (clear_event_cpu, clear_event_cpu_cmd, "clear event cpu [FILTER]", @@ -457,14 +454,6 @@ DEFUN (clear_event_cpu, return CMD_SUCCESS; } -ALIAS (clear_event_cpu, - clear_thread_cpu_cmd, - "clear thread cpu [FILTER]", - "Clear stored data in all pthreads\n" - "Thread information\n" - "Thread CPU usage\n" - "Display filter (rwtexb)\n") - static void show_event_timers_helper(struct vty *vty, struct event_loop *m) { const char *name = m->name ? m->name : "main"; @@ -504,7 +493,6 @@ void event_cmd_init(void) { install_element(VIEW_NODE, &show_event_cpu_cmd); install_element(VIEW_NODE, &show_event_poll_cmd); - install_element(ENABLE_NODE, &clear_thread_cpu_cmd); install_element(ENABLE_NODE, &clear_event_cpu_cmd); install_element(CONFIG_NODE, &service_cputime_stats_cmd); diff --git a/lib/frrscript.c b/lib/frrscript.c index 06460b014d..8b068ba61b 100644 --- a/lib/frrscript.c +++ b/lib/frrscript.c @@ -248,10 +248,12 @@ int _frrscript_call_lua(struct lua_function_state *lfs, int nargs) zlog_err("Lua hook call '%s' : error handler error: %s", lfs->name, lua_tostring(lfs->L, -1)); break; +#if defined(LUA_VERSION_NUM) && LUA_VERSION_NUM <= 503 case LUA_ERRGCMM: zlog_err("Lua hook call '%s' : garbage collector error: %s", lfs->name, lua_tostring(lfs->L, -1)); break; +#endif default: zlog_err("Lua hook call '%s' : unknown error: %s", lfs->name, lua_tostring(lfs->L, -1)); @@ -29,6 +29,10 @@ #include "admin_group.h" #include "lib/if_clippy.c" + +/* Set by the owner (zebra). */ +bool if_notify_oper_changes; + DEFINE_MTYPE_STATIC(LIB, IF, "Interface"); DEFINE_MTYPE_STATIC(LIB, IFDESC, "Intf Desc"); DEFINE_MTYPE_STATIC(LIB, CONNECTED, "Connected"); @@ -208,6 +212,104 @@ void if_down_via_zapi(struct interface *ifp) hook_call(if_down, ifp); } +void if_update_state_metric(struct interface *ifp, uint32_t metric) +{ + if (ifp->metric == metric) + return; + ifp->metric = metric; + if (ifp->state && if_notify_oper_changes) + nb_op_updatef(ifp->state, "metric", "%u", ifp->metric); +} + +void if_update_state_mtu(struct interface *ifp, uint mtu) +{ + if (ifp->mtu == mtu) + return; + ifp->mtu = mtu; + if (ifp->state && if_notify_oper_changes) + nb_op_updatef(ifp->state, "mtu", "%u", ifp->mtu); +} + +void if_update_state_mtu6(struct interface *ifp, uint mtu) +{ + if (ifp->mtu6 == mtu) + return; + ifp->mtu6 = mtu; + if (ifp->state && if_notify_oper_changes) + nb_op_updatef(ifp->state, "mtu6", "%u", ifp->mtu); +} + +void if_update_state_hw_addr(struct interface *ifp, const uint8_t *hw_addr, uint len) +{ + if (len == (uint)ifp->hw_addr_len && (len == 0 || !memcmp(hw_addr, ifp->hw_addr, len))) + return; + memcpy(ifp->hw_addr, hw_addr, len); + ifp->hw_addr_len = len; + if (ifp->state && if_notify_oper_changes) + nb_op_updatef(ifp->state, "phy-address", "%pEA", (struct ethaddr *)ifp->hw_addr); +} + +void if_update_state_speed(struct interface *ifp, uint32_t speed) +{ + if (ifp->speed == speed) + return; + ifp->speed = speed; + if (ifp->state && if_notify_oper_changes) + nb_op_updatef(ifp->state, "speed", "%u", ifp->speed); +} + +void if_update_state(struct interface *ifp) +{ + struct lyd_node *state = ifp->state; + + if (!state || !if_notify_oper_changes) + return; + + /* + * Remove top level container update when we have patch support, for now + * this keeps us from generating 6 separate REPLACE messages though. + */ + // nb_op_update(state, ".", NULL); + nb_op_updatef(state, "if-index", "%d", ifp->ifindex); + nb_op_updatef(state, "mtu", "%u", ifp->mtu); + nb_op_updatef(state, "mtu6", "%u", ifp->mtu); + nb_op_updatef(state, "speed", "%u", ifp->speed); + nb_op_updatef(state, "metric", "%u", ifp->metric); + nb_op_updatef(state, "phy-address", "%pEA", (struct ethaddr *)ifp->hw_addr); +} + +static void if_update_state_remove(struct interface *ifp) +{ + if (!if_notify_oper_changes || ifp->name[0] == 0) + return; + + if (vrf_is_backend_netns()) + nb_op_update_delete_pathf(NULL, "/frr-interface:lib/interface[name=\"%s:%s\"]/state", + ifp->vrf->name, ifp->name); + else + nb_op_update_delete_pathf(NULL, "/frr-interface:lib/interface[name=\"%s\"]/state", + ifp->name); + if (ifp->state) { + lyd_free_all(ifp->state); + ifp->state = NULL; + } +} + +static void if_update_state_add(struct interface *ifp) +{ + if (!if_notify_oper_changes || ifp->name[0] == 0) + return; + + if (vrf_is_backend_netns()) + ifp->state = nb_op_update_pathf(NULL, + "/frr-interface:lib/interface[name=\"%s:%s\"]/state", + NULL, ifp->vrf->name, ifp->name); + else + ifp->state = nb_op_update_pathf(NULL, + "/frr-interface:lib/interface[name=\"%s\"]/state", + NULL, ifp->name); +} + static struct interface *if_create_name(const char *name, struct vrf *vrf) { struct interface *ifp; @@ -216,7 +318,11 @@ static struct interface *if_create_name(const char *name, struct vrf *vrf) if_set_name(ifp, name); + if (if_notify_oper_changes && ifp->state) + if_update_state(ifp); + hook_call(if_add, ifp); + return ifp; } @@ -228,8 +334,10 @@ void if_update_to_new_vrf(struct interface *ifp, vrf_id_t vrf_id) /* remove interface from old master vrf list */ old_vrf = ifp->vrf; - if (ifp->name[0] != '\0') + if (ifp->name[0] != '\0') { IFNAME_RB_REMOVE(old_vrf, ifp); + if_update_state_remove(ifp); + } if (ifp->ifindex != IFINDEX_INTERNAL) IFINDEX_RB_REMOVE(old_vrf, ifp); @@ -237,8 +345,11 @@ void if_update_to_new_vrf(struct interface *ifp, vrf_id_t vrf_id) vrf = vrf_get(vrf_id, NULL); ifp->vrf = vrf; - if (ifp->name[0] != '\0') + if (ifp->name[0] != '\0') { IFNAME_RB_INSERT(vrf, ifp); + if_update_state_add(ifp); + if_update_state(ifp); + } if (ifp->ifindex != IFINDEX_INTERNAL) IFINDEX_RB_INSERT(vrf, ifp); @@ -280,6 +391,8 @@ void if_delete(struct interface **ifp) XFREE(MTYPE_IFDESC, ptr->desc); + if_update_state_remove(ptr); + XFREE(MTYPE_IF, ptr); *ifp = NULL; } @@ -303,7 +416,6 @@ static struct interface *if_lookup_by_ifindex(ifindex_t ifindex, struct interface *if_lookup_by_index(ifindex_t ifindex, vrf_id_t vrf_id) { switch (vrf_get_backend()) { - case VRF_BACKEND_UNKNOWN: case VRF_BACKEND_NETNS: return(if_lookup_by_ifindex(ifindex, vrf_id)); case VRF_BACKEND_VRF_LITE: @@ -573,7 +685,6 @@ struct interface *if_get_by_name(const char *name, vrf_id_t vrf_id, struct vrf *vrf; switch (vrf_get_backend()) { - case VRF_BACKEND_UNKNOWN: case VRF_BACKEND_NETNS: vrf = vrf_get(vrf_id, vrf_name); assert(vrf); @@ -630,6 +741,9 @@ int if_set_index(struct interface *ifp, ifindex_t ifindex) ifp->ifindex = ifindex; + if (if_notify_oper_changes) + nb_op_updatef(ifp->state, "if-index", "%d", ifp->ifindex); + if (ifp->ifindex != IFINDEX_INTERNAL) { /* * This should never happen, since we checked if there was @@ -648,13 +762,17 @@ static void if_set_name(struct interface *ifp, const char *name) if (if_cmp_name_func(ifp->name, name) == 0) return; - if (ifp->name[0] != '\0') + if (ifp->name[0] != '\0') { IFNAME_RB_REMOVE(ifp->vrf, ifp); + if_update_state_remove(ifp); + } strlcpy(ifp->name, name, sizeof(ifp->name)); - if (ifp->name[0] != '\0') + if (ifp->name[0] != '\0') { IFNAME_RB_INSERT(ifp->vrf, ifp); + if_update_state_add(ifp); + } } /* Does interface up ? */ @@ -858,47 +976,6 @@ struct nbr_connected *nbr_connected_check(struct interface *ifp, return NULL; } -/* Print if_addr structure. */ -static void __attribute__((unused)) -connected_log(struct connected *connected, char *str) -{ - struct prefix *p; - struct interface *ifp; - char logbuf[BUFSIZ]; - char buf[BUFSIZ]; - - ifp = connected->ifp; - p = connected->address; - - snprintf(logbuf, sizeof(logbuf), "%s interface %s vrf %s(%u) %s %pFX ", - str, ifp->name, ifp->vrf->name, ifp->vrf->vrf_id, - prefix_family_str(p), p); - - p = connected->destination; - if (p) { - strlcat(logbuf, inet_ntop(p->family, &p->u.prefix, buf, BUFSIZ), - BUFSIZ); - } - zlog_info("%s", logbuf); -} - -/* Print if_addr structure. */ -static void __attribute__((unused)) -nbr_connected_log(struct nbr_connected *connected, char *str) -{ - struct prefix *p; - struct interface *ifp; - char logbuf[BUFSIZ]; - - ifp = connected->ifp; - p = connected->address; - - snprintf(logbuf, sizeof(logbuf), "%s interface %s %s %pFX ", str, - ifp->name, prefix_family_str(p), p); - - zlog_info("%s", logbuf); -} - /* count the number of connected addresses that are in the given family */ unsigned int connected_count_by_family(struct interface *ifp, int family) { @@ -1649,90 +1726,90 @@ static int lib_interface_description_destroy(struct nb_cb_destroy_args *args) return NB_OK; } -/* - * XPath: /frr-interface:lib/interface/vrf - */ -static struct yang_data * -lib_interface_vrf_get_elem(struct nb_cb_get_elem_args *args) +static enum nb_error __return_ok(const struct nb_node *nb_node, const void *list_entry, + struct lyd_node *parent) { - const struct interface *ifp = args->list_entry; - - return yang_data_new_string(args->xpath, ifp->vrf->name); + return NB_OK; } /* - * XPath: /frr-interface:lib/interface/state/if-index + * XPath: /frr-interface:lib/interface/vrf */ -static struct yang_data * -lib_interface_state_if_index_get_elem(struct nb_cb_get_elem_args *args) +static enum nb_error lib_interface_vrf_get(const struct nb_node *nb_node, const void *list_entry, + struct lyd_node *parent) { - const struct interface *ifp = args->list_entry; + const struct lysc_node *snode = nb_node->snode; + const struct interface *ifp = list_entry; - return yang_data_new_int32(args->xpath, ifp->ifindex); + if (lyd_new_term(parent, snode->module, snode->name, ifp->vrf->name, LYD_NEW_PATH_UPDATE, + NULL)) + return NB_ERR_RESOURCE; + return NB_OK; } /* - * XPath: /frr-interface:lib/interface/state/mtu + * XPath: /frr-interface:lib/interface/state/if-index */ -static struct yang_data * -lib_interface_state_mtu_get_elem(struct nb_cb_get_elem_args *args) +static enum nb_error lib_interface_state_if_index_get(const struct nb_node *nb_node, + const void *list_entry, + struct lyd_node *parent) { - const struct interface *ifp = args->list_entry; + const struct lysc_node *snode = nb_node->snode; + const struct interface *ifp = list_entry; + int32_t value = ifp->ifindex; - return yang_data_new_uint32(args->xpath, ifp->mtu); + if (lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value), + LYD_NEW_PATH_UPDATE, NULL)) + return NB_ERR_RESOURCE; + return NB_OK; } /* - * XPath: /frr-interface:lib/interface/state/mtu6 + * XPath: /frr-interface:lib/interface/state/mtu[6] */ -static struct yang_data * -lib_interface_state_mtu6_get_elem(struct nb_cb_get_elem_args *args) +static enum nb_error lib_interface_state_mtu_get(const struct nb_node *nb_node, + const void *list_entry, struct lyd_node *parent) { - const struct interface *ifp = args->list_entry; + const struct lysc_node *snode = nb_node->snode; + const struct interface *ifp = list_entry; + uint32_t value = ifp->mtu; - return yang_data_new_uint32(args->xpath, ifp->mtu6); + if (lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value), + LYD_NEW_PATH_UPDATE, NULL)) + return NB_ERR_RESOURCE; + return NB_OK; } /* * XPath: /frr-interface:lib/interface/state/speed */ -static struct yang_data * -lib_interface_state_speed_get_elem(struct nb_cb_get_elem_args *args) +static enum nb_error lib_interface_state_speed_get(const struct nb_node *nb_node, + const void *list_entry, struct lyd_node *parent) { - const struct interface *ifp = args->list_entry; + const struct lysc_node *snode = nb_node->snode; + const struct interface *ifp = list_entry; + uint32_t value = ifp->speed; - return yang_data_new_uint32(args->xpath, ifp->speed); + if (lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value), + LYD_NEW_PATH_UPDATE, NULL)) + return NB_ERR_RESOURCE; + return NB_OK; } /* * XPath: /frr-interface:lib/interface/state/metric */ -static struct yang_data * -lib_interface_state_metric_get_elem(struct nb_cb_get_elem_args *args) +static enum nb_error lib_interface_state_metric_get(const struct nb_node *nb_node, + const void *list_entry, struct lyd_node *parent) { - const struct interface *ifp = args->list_entry; - - return yang_data_new_uint32(args->xpath, ifp->metric); -} + const struct lysc_node *snode = nb_node->snode; + const struct interface *ifp = list_entry; + uint32_t value = ifp->metric; -/* - * XPath: /frr-interface:lib/interface/state/flags - */ -static struct yang_data * -lib_interface_state_flags_get_elem(struct nb_cb_get_elem_args *args) -{ - /* TODO: implement me. */ - return NULL; -} - -/* - * XPath: /frr-interface:lib/interface/state/type - */ -static struct yang_data * -lib_interface_state_type_get_elem(struct nb_cb_get_elem_args *args) -{ - /* TODO: implement me. */ - return NULL; + if (lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value), + LYD_NEW_PATH_UPDATE, NULL)) + return NB_ERR_RESOURCE; + return NB_OK; } /* @@ -1779,49 +1856,49 @@ const struct frr_yang_module_info frr_interface_info = { { .xpath = "/frr-interface:lib/interface/vrf", .cbs = { - .get_elem = lib_interface_vrf_get_elem, + .get = lib_interface_vrf_get, } }, { .xpath = "/frr-interface:lib/interface/state/if-index", .cbs = { - .get_elem = lib_interface_state_if_index_get_elem, + .get = lib_interface_state_if_index_get, } }, { .xpath = "/frr-interface:lib/interface/state/mtu", .cbs = { - .get_elem = lib_interface_state_mtu_get_elem, + .get = lib_interface_state_mtu_get, } }, { .xpath = "/frr-interface:lib/interface/state/mtu6", .cbs = { - .get_elem = lib_interface_state_mtu6_get_elem, + .get = lib_interface_state_mtu_get, } }, { .xpath = "/frr-interface:lib/interface/state/speed", .cbs = { - .get_elem = lib_interface_state_speed_get_elem, + .get = lib_interface_state_speed_get, } }, { .xpath = "/frr-interface:lib/interface/state/metric", .cbs = { - .get_elem = lib_interface_state_metric_get_elem, + .get = lib_interface_state_metric_get, } }, { .xpath = "/frr-interface:lib/interface/state/flags", .cbs = { - .get_elem = lib_interface_state_flags_get_elem, + .get = __return_ok, } }, { .xpath = "/frr-interface:lib/interface/state/type", .cbs = { - .get_elem = lib_interface_state_type_get_elem, + .get = __return_ok, } }, { @@ -297,6 +297,8 @@ struct interface { struct vrf *vrf; + struct lyd_node *state; + /* * Has the end users entered `interface XXXX` from the cli in some * fashion? @@ -633,6 +635,14 @@ extern void if_up_via_zapi(struct interface *ifp); extern void if_down_via_zapi(struct interface *ifp); extern void if_destroy_via_zapi(struct interface *ifp); +extern void if_update_state(struct interface *ifp); +extern void if_update_state_metric(struct interface *ifp, uint32_t metric); +extern void if_update_state_mtu(struct interface *ifp, uint mtu); +extern void if_update_state_mtu6(struct interface *ifp, uint mtu); +extern void if_update_state_hw_addr(struct interface *ifp, const uint8_t *hw_addr, uint len); +extern void if_update_state_speed(struct interface *ifp, uint32_t speed); + +extern bool if_notify_oper_changes; extern const struct frr_yang_module_info frr_interface_info; extern const struct frr_yang_module_info frr_interface_cli_info; diff --git a/lib/libfrr.c b/lib/libfrr.c index d1a9f0b1cb..261d3aa87e 100644 --- a/lib/libfrr.c +++ b/lib/libfrr.c @@ -108,6 +108,9 @@ static const struct option lo_always[] = { { "module", no_argument, NULL, 'M' }, { "profile", required_argument, NULL, 'F' }, { "pathspace", required_argument, NULL, 'N' }, +#ifdef HAVE_NETLINK + { "vrfwnetns", no_argument, NULL, 'w' }, +#endif { "vrfdefaultname", required_argument, NULL, 'o' }, { "graceful_restart", optional_argument, NULL, 'K' }, { "vty_socket", required_argument, NULL, OPTION_VTYSOCK }, @@ -120,6 +123,9 @@ static const struct option lo_always[] = { { NULL } }; static const struct optspec os_always = { +#ifdef HAVE_NETLINK + "w" +#endif "hvdM:F:N:o:K::", " -h, --help Display this help and exit\n" " -v, --version Print program version\n" @@ -127,6 +133,9 @@ static const struct optspec os_always = { " -M, --module Load specified module\n" " -F, --profile Use specified configuration profile\n" " -N, --pathspace Insert prefix into config & socket paths\n" +#ifdef HAVE_NETLINK + " -w, --vrfwnetns Use network namespaces for VRFs\n" +#endif " -o, --vrfdefaultname Set default VRF name.\n" " -K, --graceful_restart FRR starting in Graceful Restart mode, with optional route-cleanup timer\n" " --vty_socket Override vty socket path\n" @@ -516,6 +525,11 @@ static int frr_opt(int opt) snprintf(frr_zclientpath, sizeof(frr_zclientpath), ZAPI_SOCK_NAME); break; +#ifdef HAVE_NETLINK + case 'w': + vrf_configure_backend(VRF_BACKEND_NETNS); + break; +#endif case 'o': vrf_set_default_name(optarg); break; diff --git a/lib/mgmt_be_client.c b/lib/mgmt_be_client.c index f03006ad0e..806242ed53 100644 --- a/lib/mgmt_be_client.c +++ b/lib/mgmt_be_client.c @@ -99,12 +99,12 @@ struct mgmt_be_client { struct nb_config *candidate_config; struct nb_config *running_config; - unsigned long num_edit_nb_cfg; - unsigned long avg_edit_nb_cfg_tm; - unsigned long num_prep_nb_cfg; - unsigned long avg_prep_nb_cfg_tm; - unsigned long num_apply_nb_cfg; - unsigned long avg_apply_nb_cfg_tm; + uint64_t num_edit_nb_cfg; + uint64_t avg_edit_nb_cfg_tm; + uint64_t num_prep_nb_cfg; + uint64_t avg_prep_nb_cfg_tm; + uint64_t num_apply_nb_cfg; + uint64_t avg_apply_nb_cfg_tm; struct mgmt_be_txns_head txn_head; @@ -117,7 +117,7 @@ struct mgmt_be_client { struct debug mgmt_dbg_be_client = { .conf = "debug mgmt client backend", - .desc = "Management backend client operations" + .desc = "Management backend client operations", }; /* NOTE: only one client per proc for now. */ @@ -312,49 +312,102 @@ static int be_client_send_error(struct mgmt_be_client *client, uint64_t txn_id, return ret; } -static int mgmt_be_send_notification(void *__be_client, const char *xpath, - const struct lyd_node *tree) +static int __send_notification(struct mgmt_be_client *client, const char *xpath, + const struct lyd_node *tree, uint8_t op) { - struct mgmt_be_client *client = __be_client; struct mgmt_msg_notify_data *msg = NULL; + // LYD_FORMAT format = LYD_LYB; LYD_FORMAT format = LYD_JSON; uint8_t **darrp; LY_ERR err; int ret = 0; - assert(tree); - - debug_be_client("%s: sending YANG notification: %s", __func__, - tree->schema->name); + assert(op != NOTIFY_OP_NOTIFICATION || xpath || tree); + debug_be_client("%s: sending %sYANG %snotification: %s", __func__, + op == NOTIFY_OP_DS_DELETE ? "delete " + : op == NOTIFY_OP_DS_REPLACE ? "replace " + : op == NOTIFY_OP_DS_PATCH ? "patch " + : "", + op == NOTIFY_OP_NOTIFICATION ? "" : "DS ", xpath ?: tree->schema->name); /* * Allocate a message and append the data to it using `format` */ - msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_notify_data, 0, - MTYPE_MSG_NATIVE_NOTIFY); + msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_notify_data, 0, MTYPE_MSG_NATIVE_NOTIFY); msg->code = MGMT_MSG_CODE_NOTIFY; msg->result_type = format; + msg->op = op; mgmt_msg_native_xpath_encode(msg, xpath); - darrp = mgmt_msg_native_get_darrp(msg); - err = yang_print_tree_append(darrp, tree, format, - (LYD_PRINT_SHRINK | LYD_PRINT_WD_EXPLICIT | - LYD_PRINT_WITHSIBLINGS)); - if (err) { - flog_err(EC_LIB_LIBYANG, - "%s: error creating notification data: %s", __func__, - ly_strerrcode(err)); - ret = 1; - goto done; + if (tree) { + darrp = mgmt_msg_native_get_darrp(msg); + err = yang_print_tree_append(darrp, tree, format, + (LYD_PRINT_SHRINK | LYD_PRINT_WD_EXPLICIT | + LYD_PRINT_WITHSIBLINGS)); + if (err) { + flog_err(EC_LIB_LIBYANG, "%s: error creating notification data: %s", + __func__, ly_strerrcode(err)); + ret = 1; + goto done; + } } - (void)be_client_send_native_msg(client, msg, - mgmt_msg_native_get_msg_len(msg), false); + ret = be_client_send_native_msg(client, msg, mgmt_msg_native_get_msg_len(msg), false); done: mgmt_msg_native_free_msg(msg); return ret; } +/** + * mgmt_be_send_ds_delete_notification() - Send DS notification to mgmtd + */ +int mgmt_be_send_ds_delete_notification(const char *path) +{ + if (!__be_client) { + debug_be_client("%s: No mgmtd connection for DS delete notification: %s", __func__, + path); + return 1; + } + return __send_notification(__be_client, path, NULL, NOTIFY_OP_DS_DELETE); +} + +/** + * mgmt_be_send_ds_patch_notification() - Send a YANG patch DS notification to mgmtd + */ +int mgmt_be_send_ds_patch_notification(const char *path, const struct lyd_node *patch) +{ + if (!__be_client) { + debug_be_client("%s: No mgmtd connection for DS delete notification: %s", __func__, + path); + return 1; + } + return __send_notification(__be_client, path, patch, NOTIFY_OP_DS_PATCH); +} + +/** + * mgmt_be_send_ds_replace_notification() - Send a replace DS notification to mgmtd + */ +int mgmt_be_send_ds_replace_notification(const char *path, const struct lyd_node *tree) +{ + if (!__be_client) { + debug_be_client("%s: No mgmtd connection for DS delete notification: %s", __func__, + path); + return 1; + } + return __send_notification(__be_client, path, tree, NOTIFY_OP_DS_REPLACE); +} + +/** + * mgmt_be_send_notification() - Send notification to mgmtd + * + * This function is attached to the northbound notification hook. + */ +static int mgmt_be_send_notification(void *__client, const char *path, const struct lyd_node *tree) +{ + __send_notification(__client, path, tree, NOTIFY_OP_NOTIFICATION); + return 0; +} + static int mgmt_be_send_txn_reply(struct mgmt_be_client *client_ctx, uint64_t txn_id, bool create) { @@ -568,7 +621,7 @@ static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn) mgmt_be_send_cfgdata_create_reply(client_ctx, txn->txn_id, error ? false : true, error ? err_buf : NULL); - debug_be_client("Avg-nb-edit-duration %lu uSec, nb-prep-duration %lu (avg: %lu) uSec, batch size %u", + debug_be_client("Avg-nb-edit-duration %Lu uSec, nb-prep-duration %lu (avg: %Lu) uSec, batch size %u", client_ctx->avg_edit_nb_cfg_tm, prep_nb_cfg_tm, client_ctx->avg_prep_nb_cfg_tm, (uint32_t)num_processed); @@ -717,10 +770,9 @@ static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn) gettimeofday(&apply_nb_cfg_end, NULL); apply_nb_cfg_tm = timeval_elapsed(apply_nb_cfg_end, apply_nb_cfg_start); - client_ctx->avg_apply_nb_cfg_tm = ((client_ctx->avg_apply_nb_cfg_tm * - client_ctx->num_apply_nb_cfg) + - apply_nb_cfg_tm) / - (client_ctx->num_apply_nb_cfg + 1); + client_ctx->avg_apply_nb_cfg_tm = + ((client_ctx->avg_apply_nb_cfg_tm * client_ctx->num_apply_nb_cfg) + apply_nb_cfg_tm) / + (client_ctx->num_apply_nb_cfg + 1); client_ctx->num_apply_nb_cfg++; txn->nb_txn = NULL; @@ -736,8 +788,8 @@ static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn) mgmt_be_send_apply_reply(client_ctx, txn->txn_id, true, NULL); - debug_be_client("Nb-apply-duration %lu (avg: %lu) uSec", - apply_nb_cfg_tm, client_ctx->avg_apply_nb_cfg_tm); + debug_be_client("Nb-apply-duration %lu (avg: %Lu) uSec", apply_nb_cfg_tm, + client_ctx->avg_apply_nb_cfg_tm); return 0; } @@ -854,8 +906,15 @@ static enum nb_error be_client_send_tree_data_batch(const struct lyd_node *tree, more = true; ret = NB_OK; } - if (ret != NB_OK) + if (ret != NB_OK) { + if (be_client_send_error(client, args->txn_id, args->req_id, false, -EINVAL, + "BE client %s txn-id %Lu error fetching oper state %d", + client->name, args->txn_id, ret)) + ret = NB_ERR; + else + ret = NB_OK; goto done; + } tree_msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_tree_data, 0, MTYPE_MSG_NATIVE_TREE_DATA); @@ -870,20 +929,15 @@ static enum nb_error be_client_send_tree_data_batch(const struct lyd_node *tree, (LYD_PRINT_SHRINK | LYD_PRINT_WD_EXPLICIT | LYD_PRINT_WITHSIBLINGS)); if (err) { - ret = NB_ERR; - goto done; + mgmt_msg_native_free_msg(tree_msg); + /* We will be called again to send the error */ + return NB_ERR; } (void)be_client_send_native_msg(client, tree_msg, mgmt_msg_native_get_msg_len(tree_msg), false); -done: mgmt_msg_native_free_msg(tree_msg); - if (ret) - be_client_send_error(client, args->txn_id, args->req_id, false, - -EINVAL, - "BE client %s txn-id %" PRIu64 - " error fetching oper state %d", - client->name, args->txn_id, ret); +done: if (ret != NB_OK || !more) XFREE(MTYPE_MGMTD_BE_GT_CB_ARGS, args); return ret; @@ -1060,19 +1114,24 @@ static void be_client_handle_notify(struct mgmt_be_client *client, void *msgbuf, size_t msg_len) { struct mgmt_msg_notify_data *notif_msg = msgbuf; - struct nb_node *nb_node; - struct lyd_node *dnode; + struct nb_node *nb_node, *nb_parent; + struct lyd_node *dnode = NULL; const char *data = NULL; const char *notif; - LY_ERR err; + bool is_yang_notify; + LY_ERR err = LY_SUCCESS; debug_be_client("Received notification for client %s", client->name); notif = mgmt_msg_native_xpath_data_decode(notif_msg, msg_len, data); - if (!notif || !data) { + if (!notif) { log_err_be_client("Corrupt notify msg"); return; } + if (!data && (notif_msg->op == NOTIFY_OP_DS_REPLACE || notif_msg->op == NOTIFY_OP_DS_PATCH)) { + log_err_be_client("Corrupt replace/patch notify msg: missing data"); + return; + } nb_node = nb_node_find(notif); if (!nb_node) { @@ -1080,25 +1139,62 @@ static void be_client_handle_notify(struct mgmt_be_client *client, void *msgbuf, return; } - if (!nb_node->cbs.notify) { + is_yang_notify = !!CHECK_FLAG(nb_node->snode->nodetype, LYS_NOTIF); + + if (is_yang_notify && !nb_node->cbs.notify) { debug_be_client("No notification callback for: %s", notif); return; } - err = yang_parse_notification(notif, notif_msg->result_type, data, + if (!nb_node->cbs.notify) { + /* + * See if a parent has a callback, this is so backend's can + * listen for changes on an entire datastore sub-tree. + */ + for (nb_parent = nb_node->parent; nb_parent; nb_parent = nb_node->parent) + if (nb_parent->cbs.notify) + break; + if (!nb_parent) { + debug_be_client("Including parents, no DS notification callback for: %s", + notif); + return; + } + nb_node = nb_parent; + } + + if (data && is_yang_notify) { + err = yang_parse_notification(notif, notif_msg->result_type, data, &dnode); + } else if (data) { + err = yang_parse_data(notif, notif_msg->result_type, false, true, false, data, &dnode); + } if (err) { - log_err_be_client("Can't parse notification data for: %s", - notif); + log_err_be_client("Can't parse notification data for: %s", notif); return; } - nb_callback_notify(nb_node, notif, dnode); + nb_callback_notify(nb_node, notif_msg->op, notif, dnode); lyd_free_all(dnode); } /* + * Process a notify select msg + */ +static void be_client_handle_notify_select(struct mgmt_be_client *client, void *msgbuf, + size_t msg_len) +{ + struct mgmt_msg_notify_select *msg = msgbuf; + const char **selectors = NULL; + + debug_be_client("Received notify-select for client %s", client->name); + + if (msg_len >= sizeof(*msg)) + selectors = mgmt_msg_native_strings_decode(msg, msg_len, msg->selectors); + nb_notif_set_filters(selectors, msg->replace); +} + +/* * Handle a native encoded message * * We don't create transactions with native messaging. @@ -1119,6 +1215,9 @@ static void be_client_handle_native_msg(struct mgmt_be_client *client, case MGMT_MSG_CODE_NOTIFY: be_client_handle_notify(client, msg, msg_len); break; + case MGMT_MSG_CODE_NOTIFY_SELECT: + be_client_handle_notify_select(client, msg, msg_len); + break; default: log_err_be_client("unknown native message txn-id %" PRIu64 " req-id %" PRIu64 " code %u to client %s", @@ -1259,6 +1358,190 @@ DEFPY(debug_mgmt_client_be, debug_mgmt_client_be_cmd, return CMD_SUCCESS; } +/* + * XPath: /frr-backend:clients/client + * + * We only implement a list of one entry (for the this backend client) the + * results will be merged inside mgmtd. + */ +static const void *clients_client_get_next(struct nb_cb_get_next_args *args) +{ + if (args->list_entry == NULL) + return __be_client; + return NULL; +} + +static int clients_client_get_keys(struct nb_cb_get_keys_args *args) +{ + args->keys->num = 1; + strlcpy(args->keys->key[0], __be_client->name, sizeof(args->keys->key[0])); + + return NB_OK; +} + +static const void *clients_client_lookup_entry(struct nb_cb_lookup_entry_args *args) +{ + const char *name = args->keys->key[0]; + + if (!strcmp(name, __be_client->name)) + return __be_client; + + return NULL; +} + +/* + * XPath: /frr-backend:clients/client/name + */ +static enum nb_error clients_client_name_get(const struct nb_node *nb_node, + const void *parent_list_entry, struct lyd_node *parent) +{ + const struct lysc_node *snode = nb_node->snode; + LY_ERR err; + + err = lyd_new_term(parent, snode->module, snode->name, __be_client->name, false, NULL); + if (err != LY_SUCCESS) + return NB_ERR_RESOURCE; + + return NB_OK; +} + +/* + * XPath: /frr-backend:clients/client/state/candidate-config-version + */ +static enum nb_error clients_client_state_candidate_config_version_get( + const struct nb_node *nb_node, const void *parent_list_entry, struct lyd_node *parent) +{ + const struct lysc_node *snode = nb_node->snode; + uint64_t value = __be_client->candidate_config->version; + + if (lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value), + LYD_NEW_PATH_UPDATE, NULL)) + return NB_ERR_RESOURCE; + + return NB_OK; +} + +/* + * XPath: /frr-backend:clients/client/state/running-config-version + */ +static enum nb_error clients_client_state_running_config_version_get(const struct nb_node *nb_node, + const void *parent_list_entry, + struct lyd_node *parent) +{ + const struct lysc_node *snode = nb_node->snode; + uint64_t value = __be_client->running_config->version; + + if (lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value), + LYD_NEW_PATH_UPDATE, NULL)) + return NB_ERR_RESOURCE; + + return NB_OK; +} + +/* + * XPath: /frr-backend:clients/client/state/notify-selectors + * + * Is this better in northbound_notif.c? Let's decide when we add more to this module. + */ + +static enum nb_error clients_client_state_notify_selectors_get(const struct nb_node *nb_node, + const void *parent_list_entry, + struct lyd_node *parent) +{ + const struct lysc_node *snode = nb_node->snode; + const char **p; + LY_ERR err; + + darr_foreach_p (nb_notif_filters, p) { + err = lyd_new_term(parent, snode->module, snode->name, *p, false, NULL); + if (err != LY_SUCCESS) + return NB_ERR_RESOURCE; + } + + return NB_OK; +} + +/* clang-format off */ +const struct frr_yang_module_info frr_backend_info = { + .name = "frr-backend", + .nodes = { + { + .xpath = "/frr-backend:clients/client", + .cbs = { + .get_next = clients_client_get_next, + .get_keys = clients_client_get_keys, + .lookup_entry = clients_client_lookup_entry, + } + }, + { + .xpath = "/frr-backend:clients/client/name", + .cbs.get = clients_client_name_get, + }, + { + .xpath = "/frr-backend:clients/client/state/candidate-config-version", + .cbs = { + .get = clients_client_state_candidate_config_version_get, + } + }, + { + .xpath = "/frr-backend:clients/client/state/running-config-version", + .cbs = { + .get = clients_client_state_running_config_version_get, + } + }, + { + .xpath = "/frr-backend:clients/client/state/edit-count", + .cbs = { + .get = nb_oper_uint64_get, + .get_elem = (void *)(intptr_t)offsetof(struct mgmt_be_client, num_edit_nb_cfg), + } + }, + { + .xpath = "/frr-backend:clients/client/state/avg-edit-time", + .cbs = { + .get = nb_oper_uint64_get, + .get_elem = (void *)(intptr_t)offsetof(struct mgmt_be_client, avg_edit_nb_cfg_tm), + } + }, + { + .xpath = "/frr-backend:clients/client/state/prep-count", + .cbs = { + .get = nb_oper_uint64_get, + .get_elem = (void *)(intptr_t)offsetof(struct mgmt_be_client, num_prep_nb_cfg), + } + }, + { + .xpath = "/frr-backend:clients/client/state/avg-prep-time", + .cbs = { + .get = nb_oper_uint64_get, + .get_elem = (void *)(intptr_t)offsetof(struct mgmt_be_client, avg_prep_nb_cfg_tm), + } + }, + { + .xpath = "/frr-backend:clients/client/state/apply-count", + .cbs = { + .get = nb_oper_uint64_get, + .get_elem = (void *)(intptr_t)offsetof(struct mgmt_be_client, num_apply_nb_cfg), + } + }, + { + .xpath = "/frr-backend:clients/client/state/avg-apply-time", + .cbs = { + .get = nb_oper_uint64_get, + .get_elem = (void *)(intptr_t)offsetof(struct mgmt_be_client, avg_apply_nb_cfg_tm), + } + }, + { + .xpath = "/frr-backend:clients/client/state/notify-selectors", + .cbs.get = clients_client_state_notify_selectors_get, + }, + { + .xpath = NULL, + }, + } +}; +/* clang-format on */ + struct mgmt_be_client *mgmt_be_client_create(const char *client_name, struct mgmt_be_client_cbs *cbs, uintptr_t user_data, diff --git a/lib/mgmt_be_client.h b/lib/mgmt_be_client.h index 6ed8c2a39f..5e78f0f433 100644 --- a/lib/mgmt_be_client.h +++ b/lib/mgmt_be_client.h @@ -85,6 +85,8 @@ struct mgmt_be_client_cbs { extern struct debug mgmt_dbg_be_client; +extern const struct frr_yang_module_info frr_backend_info; + /*************************************************************** * API prototypes ***************************************************************/ @@ -112,6 +114,22 @@ extern struct mgmt_be_client * mgmt_be_client_create(const char *name, struct mgmt_be_client_cbs *cbs, uintptr_t user_data, struct event_loop *event_loop); + +/** + * mgmt_be_send_ds_delete_notification() - Send a datastore delete notification. + */ +extern int mgmt_be_send_ds_delete_notification(const char *path); + +/** + * mgmt_be_send_ds_patch_notification() - Send a datastore YANG patch notification. + */ +extern int mgmt_be_send_ds_patch_notification(const char *path, const struct lyd_node *tree); + +/** + * mgmt_be_send_ds_replace_notification() - Send a datastore replace notification. + */ +extern int mgmt_be_send_ds_replace_notification(const char *path, const struct lyd_node *tree); + /* * Initialize library vty (adds debug support). * diff --git a/lib/mgmt_msg_native.c b/lib/mgmt_msg_native.c index b85c7d1b61..46dfe7f2e1 100644 --- a/lib/mgmt_msg_native.c +++ b/lib/mgmt_msg_native.c @@ -14,7 +14,8 @@ DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_ERROR, "native error msg"); DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_GET_TREE, "native get tree msg"); DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_TREE_DATA, "native tree data msg"); DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_GET_DATA, "native get data msg"); -DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_NOTIFY, "native get data msg"); +DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_NOTIFY, "native notify msg"); +DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_NOTIFY_SELECT, "native notify select msg"); DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_EDIT, "native edit msg"); DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_EDIT_REPLY, "native edit reply msg"); DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_RPC, "native RPC msg"); diff --git a/lib/mgmt_msg_native.h b/lib/mgmt_msg_native.h index 587a002801..73303846e7 100644 --- a/lib/mgmt_msg_native.h +++ b/lib/mgmt_msg_native.h @@ -159,6 +159,7 @@ DECLARE_MTYPE(MSG_NATIVE_GET_TREE); DECLARE_MTYPE(MSG_NATIVE_TREE_DATA); DECLARE_MTYPE(MSG_NATIVE_GET_DATA); DECLARE_MTYPE(MSG_NATIVE_NOTIFY); +DECLARE_MTYPE(MSG_NATIVE_NOTIFY_SELECT); DECLARE_MTYPE(MSG_NATIVE_EDIT); DECLARE_MTYPE(MSG_NATIVE_EDIT_REPLY); DECLARE_MTYPE(MSG_NATIVE_RPC); @@ -323,22 +324,29 @@ _Static_assert(sizeof(struct mgmt_msg_get_data) == offsetof(struct mgmt_msg_get_data, xpath), "Size mismatch"); + +#define NOTIFY_OP_NOTIFICATION 0 +#define NOTIFY_OP_DS_REPLACE 1 +#define NOTIFY_OP_DS_DELETE 2 +#define NOTIFY_OP_DS_PATCH 3 + /** * struct mgmt_msg_notify_data - Message carrying notification data. * * @result_type: ``LYD_FORMAT`` for format of the @result value. * @data: The xpath string of the notification followed by the tree data in * @result_type format. + * @op: notify operation type. */ struct mgmt_msg_notify_data { struct mgmt_msg_header; uint8_t result_type; - uint8_t resv2[7]; + uint8_t op; + uint8_t resv2[6]; alignas(8) char data[]; }; -_Static_assert(sizeof(struct mgmt_msg_notify_data) == - offsetof(struct mgmt_msg_notify_data, data), +_Static_assert(sizeof(struct mgmt_msg_notify_data) == offsetof(struct mgmt_msg_notify_data, data), "Size mismatch"); #define EDIT_FLAG_IMPLICIT_LOCK 0x01 diff --git a/lib/northbound.c b/lib/northbound.c index a385cc9ece..60794b8728 100644 --- a/lib/northbound.c +++ b/lib/northbound.c @@ -273,9 +273,11 @@ static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node) error += nb_node_validate_cb(nb_node, NB_CB_APPLY_FINISH, !!nb_node->cbs.apply_finish, true); error += nb_node_validate_cb(nb_node, NB_CB_GET_ELEM, - !!nb_node->cbs.get_elem, false); + (nb_node->cbs.get_elem || nb_node->cbs.get), false); error += nb_node_validate_cb(nb_node, NB_CB_GET_NEXT, - !!nb_node->cbs.get_next, false); + (nb_node->cbs.get_next || + (nb_node->snode->nodetype == LYS_LEAFLIST && nb_node->cbs.get)), + false); error += nb_node_validate_cb(nb_node, NB_CB_GET_KEYS, !!nb_node->cbs.get_keys, false); error += nb_node_validate_cb(nb_node, NB_CB_LOOKUP_ENTRY, @@ -683,19 +685,30 @@ void nb_config_diff(const struct nb_config *config1, lyd_free_all(diff); } -static int dnode_create(struct nb_config *candidate, const char *xpath, - const char *value, uint32_t options, - struct lyd_node **new_dnode) +/** + * dnode_create() - create a new node in the tree + * @candidate: config tree to create node in. + * @xpath: target node to create. + * @value: value for the new if required. + * @options: lyd_new_path options + * @new_dnode: the newly created node. If options includes LYD_NEW_PATH_UPDATE, + * and the node exists (i.e., isn't create but updated), then + * new_node will be set to NULL not the existing node). + * + * Return: NB_OK or NB_ERR. + */ +static LY_ERR dnode_create(struct nb_config *candidate, const char *xpath, const char *value, + uint32_t options, struct lyd_node **new_dnode) { struct lyd_node *dnode; LY_ERR err; - err = lyd_new_path(candidate->dnode, ly_native_ctx, xpath, value, - options, &dnode); + err = lyd_new_path2(candidate->dnode, ly_native_ctx, xpath, value, 0, 0, options, NULL, + &dnode); if (err) { flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path(%s) failed: %d", __func__, xpath, err); - return NB_ERR; + return err; } else if (dnode) { err = lyd_new_implicit_tree(dnode, LYD_IMPLICIT_NO_STATE, NULL); if (err) { @@ -706,7 +719,7 @@ static int dnode_create(struct nb_config *candidate, const char *xpath, } if (new_dnode) *new_dnode = dnode; - return NB_OK; + return LY_SUCCESS; } int nb_candidate_edit(struct nb_config *candidate, const struct nb_node *nb_node, @@ -1855,7 +1868,7 @@ int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath, return nb_node->cbs.rpc(&args); } -void nb_callback_notify(const struct nb_node *nb_node, const char *xpath, +void nb_callback_notify(const struct nb_node *nb_node, uint8_t op, const char *xpath, struct lyd_node *dnode) { struct nb_cb_notify_args args = {}; @@ -1863,6 +1876,7 @@ void nb_callback_notify(const struct nb_node *nb_node, const char *xpath, DEBUGD(&nb_dbg_cbs_notify, "northbound notify: %s", xpath); args.xpath = xpath; + args.op = op; args.dnode = dnode; nb_node->cbs.notify(&args); } @@ -2752,10 +2766,15 @@ void nb_init(struct event_loop *tm, /* Initialize oper-state */ nb_oper_init(tm); + + /* Initialize notification-state */ + nb_notif_init(tm); } void nb_terminate(void) { + nb_notif_terminate(); + nb_oper_terminate(); /* Terminate the northbound CLI. */ diff --git a/lib/northbound.h b/lib/northbound.h index 97a1d31e57..c31f007e70 100644 --- a/lib/northbound.h +++ b/lib/northbound.h @@ -21,6 +21,7 @@ extern "C" { /* Forward declaration(s). */ struct vty; struct debug; +struct nb_node; struct nb_yang_xpath_tag { uint32_t ns; @@ -102,6 +103,20 @@ enum nb_cb_operation { NB_CB_NOTIFY, }; +/* Northbound error codes. */ +enum nb_error { + NB_OK = 0, + NB_ERR, + NB_ERR_NO_CHANGES, + NB_ERR_NOT_FOUND, + NB_ERR_EXISTS, + NB_ERR_LOCKED, + NB_ERR_VALIDATION, + NB_ERR_RESOURCE, + NB_ERR_INCONSISTENCY, + NB_YIELD, +}; + union nb_resource { int fd; void *ptr; @@ -290,6 +305,7 @@ struct nb_cb_rpc_args { struct nb_cb_notify_args { /* XPath of the notification. */ const char *xpath; + uint8_t op; /* * libyang data node representing the notification. If the notification @@ -426,6 +442,25 @@ struct nb_callbacks { void (*apply_finish)(struct nb_cb_apply_finish_args *args); /* + * Operational data callback (new direct tree add method). + * + * The callback function should create a new lyd_node (leaf) or + * lyd_node's (leaf list) for the value and attach to parent. + * + * nb_node + * The node representing the leaf or leaf list + * list_entry + * List entry from get_next (or NULL). + * parent + * The parent lyd_node to attach the leaf data to. + * + * Returns: + * Returns an nb_error if the data could not be added to the tree. + */ + enum nb_error (*get)(const struct nb_node *nb_node, const void *list_entry, + struct lyd_node *parent); + + /* * Operational data callback. * * The callback function should return the value of a specific leaf, @@ -672,20 +707,6 @@ struct frr_yang_module_info { #endif }; -/* Northbound error codes. */ -enum nb_error { - NB_OK = 0, - NB_ERR, - NB_ERR_NO_CHANGES, - NB_ERR_NOT_FOUND, - NB_ERR_EXISTS, - NB_ERR_LOCKED, - NB_ERR_VALIDATION, - NB_ERR_RESOURCE, - NB_ERR_INCONSISTENCY, - NB_YIELD, -}; - /* Default priority. */ #define NB_DFLT_PRIORITY (UINT32_MAX / 2) @@ -777,16 +798,19 @@ typedef int (*nb_oper_data_cb)(const struct lysc_node *snode, * error. * * If nb_op_iterate_yielding() was passed with @should_batch set then this - * callback will be invoked during each portion (batch) of the walk. + * callback will be invoked during each portion (batch) of the walk with @ret + * set to NB_YIELD. * * The @tree is read-only and should not be modified or freed. * - * If this function returns anything but NB_OK then the walk will be terminated. - * and this function will not be called again regardless of if @ret was - * `NB_YIELD` or not. + * When @ret is NB_YIELD and this function returns anything but NB_OK then the + * walk will be terminated, and this function *will* be called again with @ret + * set the non-NB_OK return value it just returned. This allows the callback + * have a single bit of code to send an error message and do any cleanup for any + * type of failure, whether that failure was from itself or from the infra code. * - * Return: NB_OK to continue or complete the walk normally, otherwise an error - * to immediately terminate the walk. + * Return: NB_OK or an error during handling of @ret == NB_YIELD otherwise the + * value is ignored. */ /* Callback function used by nb_oper_data_iter_yielding(). */ typedef enum nb_error (*nb_oper_data_finish_cb)(const struct lyd_node *tree, @@ -813,9 +837,13 @@ extern struct debug nb_dbg_libyang; /* Global running configuration. */ extern struct nb_config *running_config; +/* Global notification filters */ +extern const char **nb_notif_filters; + /* Wrappers for the northbound callbacks. */ -extern struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node, - const char *xpath, +extern struct yang_data *nb_callback_has_new_get_elem(const struct nb_node *nb_node); + +extern struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node, const char *xpath, const void *list_entry); extern const void *nb_callback_get_next(const struct nb_node *nb_node, const void *parent_list_entry, @@ -834,7 +862,7 @@ extern const void *nb_callback_lookup_next(const struct nb_node *nb_node, extern int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath, const struct lyd_node *input, struct lyd_node *output, char *errmsg, size_t errmsg_len); -extern void nb_callback_notify(const struct nb_node *nb_node, const char *xpath, +extern void nb_callback_notify(const struct nb_node *nb_node, uint8_t op, const char *xpath, struct lyd_node *dnode); /* @@ -1488,6 +1516,22 @@ extern void nb_oper_cancel_walk(void *walk); */ extern void nb_oper_cancel_all_walks(void); +/** + * nb_oper_walk_finish_arg() - return the finish arg for this walk + */ +extern void *nb_oper_walk_finish_arg(void *walk); +/** + * nb_oper_walk_cb_arg() - return the callback arg for this walk + */ +extern void *nb_oper_walk_cb_arg(void *walk); + +/* Generic getter functions */ +extern enum nb_error nb_oper_uint32_get(const struct nb_node *nb_node, + const void *parent_list_entry, struct lyd_node *parent); + +extern enum nb_error nb_oper_uint64_get(const struct nb_node *nb_node, + const void *parent_list_entry, struct lyd_node *parent); + /* * Validate if the northbound callback operation is valid for the given node. * @@ -1720,6 +1764,80 @@ extern void nb_oper_init(struct event_loop *loop); extern void nb_oper_terminate(void); extern bool nb_oper_is_yang_lib_query(const char *xpath); + +/** + * nb_op_update() - Create new state data. + * @tree: subtree @path is relative to or NULL in which case @path must be + * absolute. + * @path: The path of the state node to create. + * @value: The canonical value of the state. + * + * Return: The new libyang node. + */ +extern struct lyd_node *nb_op_update(struct lyd_node *tree, const char *path, const char *value); + +/** + * nb_op_update_delete() - Delete state data. + * @tree: subtree @path is relative to or NULL in which case @path must be + * absolute. + * @path: The path of the state node to delete, or NULL if @tree should just be + * deleted. + */ +extern void nb_op_update_delete(struct lyd_node *tree, const char *path); + +/** + * nb_op_update_pathf() - Create new state data. + * @tree: subtree @path_fmt is relative to or NULL in which case @path_fmt must + * be absolute. + * @path_fmt: The path format string of the state node to create. + * @value: The canonical value of the state. + * @...: The values to substitute into @path_fmt. + * + * Return: The new libyang node. + */ +extern struct lyd_node *nb_op_update_pathf(struct lyd_node *tree, const char *path_fmt, + const char *value, ...) PRINTFRR(2, 4); +extern struct lyd_node *nb_op_update_vpathf(struct lyd_node *tree, const char *path_fmt, + const char *value, va_list ap); +/** + * nb_op_update_delete_pathf() - Delete state data. + * @tree: subtree @path_fmt is relative to or NULL in which case @path_fmt must + * be absolute. + * @path: The path of the state node to delete. + * @...: The values to substitute into @path_fmt. + */ +extern void nb_op_update_delete_pathf(struct lyd_node *tree, const char *path_fmt, ...) + PRINTFRR(2, 3); +extern void nb_op_update_delete_vpathf(struct lyd_node *tree, const char *path_fmt, va_list ap); + +/** + * nb_op_updatef() - Create new state data. + * @tree: subtree @path is relative to or NULL in which case @path must be + * absolute. + * @path: The path of the state node to create. + * @val_fmt: The value format string to set the canonical value of the state. + * @...: The values to substitute into @val_fmt. + * + * Return: The new libyang node. + */ +extern struct lyd_node *nb_op_updatef(struct lyd_node *tree, const char *path, const char *val_fmt, + ...) PRINTFRR(3, 4); + +extern struct lyd_node *nb_op_vupdatef(struct lyd_node *tree, const char *path, const char *val_fmt, + va_list ap); + +/** + * nb_notif_set_filters() - add or replace notification filters + * @selectors: darr array of selector (filter) xpath strings, can be NULL if + * @replace is true. nb_notif_set_filters takes ownership of this + * array and the contained darr strings. + * @replace: true to replace existing set otherwise append. + */ +extern void nb_notif_set_filters(const char **selectors, bool replace); + +extern void nb_notif_init(struct event_loop *loop); +extern void nb_notif_terminate(void); + #ifdef __cplusplus } #endif diff --git a/lib/northbound_notif.c b/lib/northbound_notif.c new file mode 100644 index 0000000000..9caca9f6d7 --- /dev/null +++ b/lib/northbound_notif.c @@ -0,0 +1,707 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * December 1 2024, Christian Hopps <chopps@labn.net> + * + * Copyright (c) 2024, LabN Consulting, L.L.C. + * + */ +#include <zebra.h> +#include "debug.h" +#include "lib_errors.h" +#include "typesafe.h" +#include "northbound.h" +#include "mgmt_be_client.h" + +#define __dbg(fmt, ...) DEBUGD(&nb_dbg_notif, "NB_OP_CHANGE: %s: " fmt, __func__, ##__VA_ARGS__) +#define __log_err(fmt, ...) zlog_err("NB_OP_CHANGE: %s: ERROR: " fmt, __func__, ##__VA_ARGS__) + +#define NB_NOTIF_TIMER_MSEC (10) /* 10msec */ + +/* + * ADDS: + * - Less specific: + * - Any new add will cause more specific pending adds to be dropped and equal + * or more specific deletes to be dropped. + * - More specific: + * - Ignore any new add that is the same or more specific than an existing add. + * - A new add that is more specific than a delete should change the delete + * into an add query (since adds are reported as a replace). + * + * DELETES: + * - Less specific: + * - Any new delete will cause more specific pending deletes to be dropped and + * equal or more specific adds to be dropped. + * - More specific: + * - Ignore new deletes that are the same or more specific than existing + * deletes. + * - A new delete that is more specific than an add can be dropped since we + * use replacement methodology for the add. + * + * One thing we have to pay close attention to is that the state is going to be + * queried when the notification sent, not when we are told of the change. + */ + +DEFINE_MTYPE_STATIC(LIB, OP_CHANGE, "NB Oper Change"); +DEFINE_MTYPE_STATIC(LIB, OP_CHANGES_GROUP, "NB Oper Changes Group"); +DEFINE_MTYPE_STATIC(LIB, NB_NOTIF_WALK_ARGS, "NB Notify Oper Walk"); + +struct op_change { + RB_ENTRY(op_change) link; + char path[]; +}; + +/* + * RB tree for op_change + */ +static int op_change_cmp(const struct op_change *e1, const struct op_change *e2); +RB_HEAD(op_changes, op_change); +RB_PROTOTYPE(op_changes, op_change, link, op_change_cmp) +RB_GENERATE(op_changes, op_change, link, op_change_cmp) + +struct op_changes nb_notif_adds = RB_INITIALIZER(&nb_notif_adds); +struct op_changes nb_notif_dels = RB_INITIALIZER(&nb_notif_dels); +struct event_loop *nb_notif_master; +struct event *nb_notif_timer; +void *nb_notif_walk; + +const char **nb_notif_filters; + +/* + * We maintain a queue of change lists one entry per query and notification send + * action + */ +PREDECL_LIST(op_changes_queue); +struct op_changes_group { + struct op_changes_queue_item item; + struct op_changes adds; + struct op_changes dels; + struct op_changes *cur_changes; /* used when walking */ + struct op_change *cur_change; /* " " " */ +}; + +DECLARE_LIST(op_changes_queue, struct op_changes_group, item); +static struct op_changes_queue_head op_changes_queue; + +struct nb_notif_walk_args { + struct op_changes_group *group; + struct lyd_node *tree; +}; + +static void nb_notif_set_walk_timer(void); + + +static int pathncmp(const char *s1, const char *s2, size_t n) +{ + size_t i = 0; + + while (i < n && *s1 && *s2) { + char c1 = *s1; + char c2 = *s2; + + if ((c1 == '\'' && c2 == '\"') || (c1 == '\"' && c2 == '\'')) { + s1++; + s2++; + i++; + continue; + } + if (c1 != c2) + return (unsigned char)c1 - (unsigned char)c2; + s1++; + s2++; + i++; + } + if (i < n) + return (unsigned char)*s1 - (unsigned char)*s2; + return 0; +} + +static int pathcmp(const char *s1, const char *s2) +{ + while (*s1 && *s2) { + char c1 = *s1; + char c2 = *s2; + + if ((c1 == '\'' && c2 == '\"') || (c1 == '\"' && c2 == '\'')) { + s1++; + s2++; + continue; + } + if (c1 != c2) + return (unsigned char)c1 - (unsigned char)c2; + s1++; + s2++; + } + return (unsigned char)*s1 - (unsigned char)*s2; +} + + +static int op_change_cmp(const struct op_change *e1, const struct op_change *e2) +{ + return pathcmp(e1->path, e2->path); +} + +static struct op_change *op_change_alloc(const char *path) +{ + struct op_change *note; + size_t ssize = strlen(path) + 1; + + note = XMALLOC(MTYPE_OP_CHANGE, sizeof(*note) + ssize); + memset(note, 0, sizeof(*note)); + strlcpy(note->path, path, ssize); + + return note; +} + +static void op_change_free(struct op_change *note) +{ + XFREE(MTYPE_OP_CHANGE, note); +} + +/** + * op_changes_group_push() - Save the current set of changes on the queue. + * + * This function will save the current set of changes on the queue and + * initialize a new set of changes. + */ +static void op_changes_group_push(void) +{ + struct op_changes_group *changes; + + if (RB_EMPTY(op_changes, &nb_notif_adds) && RB_EMPTY(op_changes, &nb_notif_dels)) + return; + + __dbg("pushing current oper changes onto queue"); + + changes = XCALLOC(MTYPE_OP_CHANGES_GROUP, sizeof(*changes)); + changes->adds = nb_notif_adds; + changes->dels = nb_notif_dels; + op_changes_queue_add_tail(&op_changes_queue, changes); + + RB_INIT(op_changes, &nb_notif_adds); + RB_INIT(op_changes, &nb_notif_dels); +} + +static void op_changes_group_free(struct op_changes_group *group) +{ + struct op_change *e, *next; + + RB_FOREACH_SAFE (e, op_changes, &group->adds, next) { + RB_REMOVE(op_changes, &group->adds, e); + op_change_free(e); + } + RB_FOREACH_SAFE (e, op_changes, &group->dels, next) { + RB_REMOVE(op_changes, &group->dels, e); + op_change_free(e); + } + XFREE(MTYPE_OP_CHANGES_GROUP, group); +} + +static struct op_change *__find_less_specific(struct op_changes *head, struct op_change *note) +{ + struct op_change *e; + size_t plen; + + /* + * RB_NFIND finds equal or greater (more specific) than the key, + * so the previous node will be a less specific or no match that + * sorts earlier. We want to find when we are a more specific + * match. + */ + e = RB_NFIND(op_changes, head, note); + if (e) + e = RB_PREV(op_changes, e); + else + e = RB_MAX(op_changes, head); + if (!e) + return NULL; + plen = strlen(e->path); + if (pathncmp(e->path, note->path, plen)) + return NULL; + /* equal would have been returned from RB_NFIND() then we went RB_PREV */ + assert(strlen(note->path) != plen); + return e; +} + +static void __drop_eq_or_more_specific(struct op_changes *head, const char *path, int plen, + struct op_change *next) +{ + struct op_change *e; + + for (e = next; e != NULL; e = next) { + /* if the prefix no longer matches we are done */ + if (pathncmp(path, e->path, plen)) + break; + __dbg("dropping more specific %s: %s", head == &nb_notif_adds ? "add" : "delete", + e->path); + next = RB_NEXT(op_changes, e); + RB_REMOVE(op_changes, head, e); + op_change_free(e); + } +} + +static void __op_change_add_del(const char *path, struct op_changes *this_head, + struct op_changes *other_head) +{ + /* find out if this has been subsumed or will subsume */ + + const char *op = this_head == &nb_notif_adds ? "add" : "delete"; + struct op_change *note = op_change_alloc(path); + struct op_change *next, *e; + int plen; + + __dbg("processing oper %s change path: %s", op, path); + + /* + * See if we are already covered by a more general `op`. + */ + e = __find_less_specific(this_head, note); + if (e) { + __dbg("%s path already covered by: %s", op, e->path); + op_change_free(note); + return; + } + + /* + * Handle having a less-specific `other op`. + */ + e = __find_less_specific(other_head, note); + if (e) { + if (this_head == &nb_notif_dels) { + /* + * If we have a less-specific add then drop this + * more-specific delete as the add-replace will remove + * this missing state. + */ + __dbg("delete path already covered add-replace: %s", e->path); + } else { + /* + * If we have a less-specific delete, convert the delete + * to an add, and drop this more-specific add. The new + * less-specific add will pick up the more specific add + * during the walk and as adds are processed as replaces + * any other existing state that was to be deleted will + * still be deleted (unless it also returns) by the replace. + */ + __dbg("add covered, converting covering delete to add-replace: %s", e->path); + RB_REMOVE(op_changes, other_head, e); + __op_change_add_del(e->path, &nb_notif_adds, &nb_notif_dels); + op_change_free(e); + } + op_change_free(note); + return; + } + + e = RB_INSERT(op_changes, this_head, note); + if (e) { + __dbg("path already in %s tree: %s", op, path); + op_change_free(note); + return; + } + + __dbg("scanning for subsumed or subsuming: %s", path); + + plen = strlen(path); + + next = RB_NEXT(op_changes, note); + __drop_eq_or_more_specific(this_head, path, plen, next); + + /* Drop exact match or more specific `other op` */ + next = RB_NFIND(op_changes, other_head, note); + __drop_eq_or_more_specific(other_head, path, plen, next); + + nb_notif_set_walk_timer(); +} + +static void nb_notif_add(const char *path) +{ + __op_change_add_del(path, &nb_notif_adds, &nb_notif_dels); +} + + +static void nb_notif_delete(const char *path) +{ + __op_change_add_del(path, &nb_notif_dels, &nb_notif_adds); +} + +struct lyd_node *nb_op_update(struct lyd_node *tree, const char *path, const char *value) +{ + struct lyd_node *dnode; + const char *abs_path = NULL; + + __dbg("updating path: %s with value: %s", path, value); + + dnode = yang_state_new(tree, path, value); + + if (path[0] == '/') + abs_path = path; + else + abs_path = lyd_path(dnode, LYD_PATH_STD, NULL, 0); + + nb_notif_add(abs_path); + + if (abs_path != path) + free((char *)abs_path); + + return dnode; +} + +void nb_op_update_delete(struct lyd_node *tree, const char *path) +{ + char *abs_path = NULL; + + __dbg("deleting path: %s", path); + + if (path && path[0] == '/') + abs_path = (char *)path; + else { + assert(tree); + abs_path = lyd_path(tree, LYD_PATH_STD, NULL, 0); + assert(abs_path); + if (path) { + char *tmp = darr_strdup(abs_path); + + free(abs_path); + abs_path = tmp; + if (*darr_last(abs_path) != '/') + darr_in_strcat(abs_path, "/"); + assert(abs_path); /* silence bad CLANG NULL warning */ + darr_in_strcat(abs_path, path); + } + } + + yang_state_delete(tree, path); + + nb_notif_delete(abs_path); + + if (abs_path != path) { + if (path) + darr_free(abs_path); + else + free(abs_path); + } +} + +PRINTFRR(2, 0) +struct lyd_node *nb_op_update_vpathf(struct lyd_node *tree, const char *path_fmt, const char *value, + va_list ap) +{ + struct lyd_node *dnode; + char *path; + + path = darr_vsprintf(path_fmt, ap); + dnode = nb_op_update(tree, path, value); + darr_free(path); + + return dnode; +} + +struct lyd_node *nb_op_update_pathf(struct lyd_node *tree, const char *path_fmt, const char *value, + ...) +{ + struct lyd_node *dnode; + va_list ap; + + va_start(ap, value); + dnode = nb_op_update_vpathf(tree, path_fmt, value, ap); + va_end(ap); + + return dnode; +} + +PRINTFRR(2, 0) +void nb_op_update_delete_vpathf(struct lyd_node *tree, const char *path_fmt, va_list ap) +{ + char *path; + + path = darr_vsprintf(path_fmt, ap); + nb_op_update_delete(tree, path); + darr_free(path); +} + +void nb_op_update_delete_pathf(struct lyd_node *tree, const char *path_fmt, ...) +{ + va_list ap; + + va_start(ap, path_fmt); + nb_op_update_delete_vpathf(tree, path_fmt, ap); + va_end(ap); +} + + +PRINTFRR(3, 0) +struct lyd_node *nb_op_vupdatef(struct lyd_node *tree, const char *path, const char *val_fmt, + va_list ap) +{ + struct lyd_node *dnode; + char *value; + + value = darr_vsprintf(val_fmt, ap); + dnode = nb_op_update(tree, path, value); + darr_free(value); + + return dnode; +} + + +struct lyd_node *nb_op_updatef(struct lyd_node *tree, const char *path, const char *val_fmt, ...) +{ + struct lyd_node *dnode; + va_list ap; + + va_start(ap, val_fmt); + dnode = nb_op_vupdatef(tree, path, val_fmt, ap); + va_end(ap); + + return dnode; +} + +static struct op_changes_group *op_changes_group_next(void) +{ + struct op_changes_group *group; + + group = op_changes_queue_pop(&op_changes_queue); + if (!group) { + op_changes_group_push(); + group = op_changes_queue_pop(&op_changes_queue); + } + if (!group) + return NULL; + group->cur_changes = &group->dels; + group->cur_change = RB_MIN(op_changes, group->cur_changes); + if (!group->cur_change) { + group->cur_changes = &group->adds; + group->cur_change = RB_MIN(op_changes, group->cur_changes); + assert(group->cur_change); + } + return group; +} + +/* ---------------------------- */ +/* Query for changes and notify */ +/* ---------------------------- */ + +static void timer_walk_abort(struct nb_notif_walk_args *args); +static void timer_walk_continue(struct event *event); +static void timer_walk_done(struct nb_notif_walk_args *args); + +static struct op_change *__next_change(struct op_changes_group *group) +{ + struct op_change *next = RB_NEXT(op_changes, group->cur_change); + + /* Remove and free current so retry works */ + RB_REMOVE(op_changes, group->cur_changes, group->cur_change); + op_change_free(group->cur_change); + return next; +} + +static struct op_changes_group *__next_group(struct op_changes_group *group) +{ + __dbg("done with oper-path collection for group"); + op_changes_group_free(group); + return op_changes_group_next(); +} + +static enum nb_error oper_walk_done(const struct lyd_node *tree, void *arg, enum nb_error ret) +{ + struct nb_notif_walk_args *args = arg; + struct op_changes_group *group = args->group; + const char *path = group->cur_change->path; + + /* we don't send batches when yielding as we need completed edit in any patch */ + assert(ret != NB_YIELD); + + if (ret == NB_ERR_NOT_FOUND) { + __dbg("Path not found while walking oper tree: %s", path); + ret = NB_OK; + } else if (ret != NB_OK) { +error: + __log_err("Error notifying for datastore path: %s: %s", path, nb_err_name(ret)); + + timer_walk_abort(args); + goto done; + } else { + __dbg("Done with oper-path collection for path: %s", path); + + /* Do we need this? */ + while (tree->parent) + tree = lyd_parent(tree); + + /* Send the add (replace) notification */ + if (mgmt_be_send_ds_replace_notification(path, tree)) { + __log_err("Error sending notification message for path: %s", path); + ret = NB_ERR; + goto error; + } + } + + /* + * Advance to next change. + */ + + group->cur_change = __next_change(group); + if (!group->cur_change) { + args->group = __next_group(group); + if (!args->group) { + timer_walk_done(args); + goto done; + } + } + + /* Run next walk after giving other events a shot to run */ + event_add_timer_msec(nb_notif_master, timer_walk_continue, args, 0, &nb_notif_timer); +done: + /* Done with current walk and scheduled next one if there is more */ + nb_notif_walk = NULL; + + return ret; +} + +static int nb_notify_delete_changes(struct nb_notif_walk_args *args) +{ + struct op_changes_group *group = args->group; + + group->cur_change = RB_MIN(op_changes, group->cur_changes); + while (group->cur_change) { + if (mgmt_be_send_ds_delete_notification(group->cur_change->path)) { + __log_err("Error sending delete notification message for path: %s", + group->cur_change->path); + return 1; + } + group->cur_change = __next_change(group); + } + return 0; +} + +static void timer_walk_continue(struct event *event) +{ + struct nb_notif_walk_args *args = EVENT_ARG(event); + struct op_changes_group *group = args->group; + const char *path; + int ret; + + /* + * Notify about deletes until we have add changes to collect. + */ + while (group->cur_changes == &group->dels) { + ret = nb_notify_delete_changes(args); + if (ret) { + timer_walk_abort(args); + return; + } + + /* after deletes advance to adds */ + group->cur_changes = &group->adds; + group->cur_change = RB_MIN(op_changes, group->cur_changes); + if (group->cur_change) + break; + + args->group = __next_group(group); + if (!args->group) { + timer_walk_done(args); + return; + } + group = args->group; + } + + path = group->cur_change->path; + __dbg("starting next oper-path replace walk for path: %s", path); + nb_notif_walk = nb_oper_walk(path, NULL, 0, false, NULL, NULL, oper_walk_done, args); +} + +static void timer_walk_start(struct event *event) +{ + struct op_changes_group *group; + struct nb_notif_walk_args *args; + + __dbg("oper-state change notification timer fires"); + + group = op_changes_group_next(); + if (!group) { + __dbg("no oper changes to notify"); + return; + } + + args = XCALLOC(MTYPE_NB_NOTIF_WALK_ARGS, sizeof(*args)); + args->group = group; + + EVENT_ARG(event) = args; + timer_walk_continue(event); +} + +static void timer_walk_abort(struct nb_notif_walk_args *args) +{ + __dbg("Failed notifying datastore changes, will retry"); + + __dbg("oper-state notify setting retry timer to fire in: %d msec ", NB_NOTIF_TIMER_MSEC); + event_add_timer_msec(nb_notif_master, timer_walk_continue, args, NB_NOTIF_TIMER_MSEC, + &nb_notif_timer); +} + +static void timer_walk_done(struct nb_notif_walk_args *args) +{ + __dbg("Finished notifying for all datastore changes"); + assert(!args->group); + XFREE(MTYPE_NB_NOTIF_WALK_ARGS, args); +} + +static void nb_notif_set_walk_timer(void) +{ + if (nb_notif_walk) { + __dbg("oper-state walk already in progress."); + return; + } + if (event_is_scheduled(nb_notif_timer)) { + __dbg("oper-state notification timer already set."); + return; + } + + __dbg("oper-state notification setting timer to fire in: %d msec ", NB_NOTIF_TIMER_MSEC); + event_add_timer_msec(nb_notif_master, timer_walk_start, NULL, NB_NOTIF_TIMER_MSEC, + &nb_notif_timer); +} + +void nb_notif_set_filters(const char **selectors, bool replace) +{ + const char **csp; + + if (replace) { + darr_free_free(nb_notif_filters); + nb_notif_filters = selectors; + return; + } + darr_foreach_p (selectors, csp) + *darr_append(nb_notif_filters) = *csp; + darr_free(selectors); +} + +void nb_notif_init(struct event_loop *tm) +{ + nb_notif_master = tm; + op_changes_queue_init(&op_changes_queue); +} + +void nb_notif_terminate(void) +{ + struct nb_notif_walk_args *args = nb_notif_timer ? EVENT_ARG(nb_notif_timer) : NULL; + struct op_changes_group *group; + + __dbg("terminating: timer: %p timer arg: %p walk %p", nb_notif_timer, args, nb_notif_walk); + + EVENT_OFF(nb_notif_timer); + + if (nb_notif_walk) { + /* Grab walk args from walk if active. */ + args = nb_oper_walk_finish_arg(nb_notif_walk); + nb_oper_cancel_walk(nb_notif_walk); + nb_notif_walk = NULL; + } + if (args) { + op_changes_group_free(args->group); + XFREE(MTYPE_NB_NOTIF_WALK_ARGS, args); + } + + while ((group = op_changes_group_next())) + op_changes_group_free(group); + + darr_free_free(nb_notif_filters); +} diff --git a/lib/northbound_oper.c b/lib/northbound_oper.c index a3ff360780..6336db502a 100644 --- a/lib/northbound_oper.c +++ b/lib/northbound_oper.c @@ -35,6 +35,7 @@ * We must also process containers with lookup-next descendants last. */ +DEFINE_MTYPE_STATIC(LIB, NB_STATE, "Northbound State"); DEFINE_MTYPE_STATIC(LIB, NB_YIELD_STATE, "NB Yield State"); DEFINE_MTYPE_STATIC(LIB, NB_NODE_INFOS, "NB Node Infos"); @@ -54,6 +55,7 @@ struct nb_op_node_info { struct lyd_node *inner; const struct lysc_node *schema; /* inner schema in case we rm inner */ struct yang_list_keys keys; /* if list, keys to locate element */ + uint position; /* if keyless list, list position */ const void *list_entry; /* opaque entry from user or NULL */ uint xpath_len; /* length of the xpath string for this node */ uint niters; /* # list elems create this iteration */ @@ -232,6 +234,22 @@ static void nb_op_get_keys(struct lyd_node_inner *list_node, keys->num = n; } +static uint nb_op_get_position_predicate(struct nb_op_yield_state *ys, struct nb_op_node_info *ni) +{ + const char *cursor = ys->xpath + ni->xpath_len - 1; + + if (cursor[0] != ']') + return 0; + + while (--cursor > ys->xpath && isdigit(cursor[0])) + ; + + if (cursor[0] != '[') + return 0; + + return atoi(&cursor[1]); +} + /** * __move_back_to_next() - move back to the next lookup-next schema */ @@ -344,7 +362,8 @@ static void nb_op_resume_data_tree(struct nb_op_yield_state *ys) /** * nb_op_xpath_to_trunk() - generate a lyd_node tree (trunk) using an xpath. * @xpath_in: xpath query string to build trunk from. - * @dnode: resulting tree (trunk) + * @xpath_out: resulting xpath for the trunk. + * @trunk: resulting tree (trunk) * * Use the longest prefix of @xpath_in as possible to resolve to a tree (trunk). * This is logically as if we walked along the xpath string resolving each @@ -352,7 +371,7 @@ static void nb_op_resume_data_tree(struct nb_op_yield_state *ys) * * Return: error if any, if no error then @dnode contains the tree (trunk). */ -static enum nb_error nb_op_xpath_to_trunk(const char *xpath_in, +static enum nb_error nb_op_xpath_to_trunk(const char *xpath_in, char **xpath_out, struct lyd_node **trunk) { char *xpath = NULL; @@ -370,7 +389,10 @@ static enum nb_error nb_op_xpath_to_trunk(const char *xpath_in, if (ret != NB_OK) break; } - darr_free(xpath); + if (ret == NB_OK) + *xpath_out = xpath; + else + darr_free(xpath); return ret; } @@ -410,28 +432,57 @@ static enum nb_error nb_op_ys_finalize_node_info(struct nb_op_yield_state *ys, ni->lookup_next_ok = yield_ok && ni->has_lookup_next && (index == 0 || ni[-1].lookup_next_ok); - nb_op_get_keys((struct lyd_node_inner *)inner, &ni->keys); + if (CHECK_FLAG(nn->flags, F_NB_NODE_KEYLESS_LIST)) { + uint i; - /* A list entry cannot be present in a tree w/o it's keys */ - assert(ni->keys.num == yang_snode_num_keys(inner->schema)); + ni->position = nb_op_get_position_predicate(ys, ni); + if (!ni->position) { + flog_warn(EC_LIB_NB_OPERATIONAL_DATA, + "%s: can't decode keyless list positional predicate in %s", + __func__, ys->xpath); + return NB_ERR_NOT_FOUND; + } - /* - * Get this nodes opaque list_entry object - */ + /* + * Get the entry at the position given by the predicate + */ - if (!nn->cbs.lookup_entry) { - flog_warn(EC_LIB_NB_OPERATIONAL_DATA, - "%s: data path doesn't support iteration over operational data: %s", - __func__, ys->xpath); - return NB_ERR_NOT_FOUND; - } + /* ni->list_entry starts as the parent entry of this node */ + ni->list_entry = nb_callback_get_next(nn, ni->list_entry, NULL); + for (i = 1; i < ni->position && ni->list_entry; i++) + ni->list_entry = nb_callback_get_next(nn, ni->list_entry, ni->list_entry); - /* ni->list_entry starts as the parent entry of this node */ - ni->list_entry = nb_callback_lookup_entry(nn, ni->list_entry, &ni->keys); - if (ni->list_entry == NULL) { - flog_warn(EC_LIB_NB_OPERATIONAL_DATA, - "%s: list entry lookup failed", __func__); - return NB_ERR_NOT_FOUND; + if (i != ni->position || !ni->list_entry) { + flog_warn(EC_LIB_NB_OPERATIONAL_DATA, + "%s: entry at position %d doesn't exist in: %s", __func__, + ni->position, ys->xpath); + return NB_ERR_NOT_FOUND; + } + + } else { + nb_op_get_keys((struct lyd_node_inner *)inner, &ni->keys); + /* A list entry cannot be present in a tree w/o it's keys */ + assert(ni->keys.num == yang_snode_num_keys(inner->schema)); + + /* + * Get this nodes opaque list_entry object + */ + + /* We need a lookup entry unless this is a keyless list */ + if (!nn->cbs.lookup_entry && ni->keys.num) { + flog_warn(EC_LIB_NB_OPERATIONAL_DATA, + "%s: data path doesn't support iteration over operational data: %s", + __func__, ys->xpath); + return NB_ERR_NOT_FOUND; + } + + /* ni->list_entry starts as the parent entry of this node */ + ni->list_entry = nb_callback_lookup_entry(nn, ni->list_entry, &ni->keys); + if (ni->list_entry == NULL) { + flog_warn(EC_LIB_NB_OPERATIONAL_DATA, "%s: list entry lookup failed", + __func__); + return NB_ERR_NOT_FOUND; + } } /* @@ -460,8 +511,9 @@ static enum nb_error nb_op_ys_init_node_infos(struct nb_op_yield_state *ys) struct lyd_node *inner; struct lyd_node *node = NULL; enum nb_error ret; - uint i, len; - char *tmp; + const char *cur; + char *xpath = NULL; + uint i, len, prevlen, xplen; /* * Obtain the trunk of the data node tree of the query. @@ -471,8 +523,8 @@ static enum nb_error nb_op_ys_init_node_infos(struct nb_op_yield_state *ys) * node could be identified (e.g., a list-node name with no keys). */ - ret = nb_op_xpath_to_trunk(ys->xpath, &node); - if (ret || !node) { + ret = nb_op_xpath_to_trunk(ys->xpath, &xpath, &node); + if (ret != NB_OK || !node) { flog_warn(EC_LIB_LIBYANG, "%s: can't instantiate concrete path using xpath: %s", __func__, ys->xpath); @@ -482,12 +534,18 @@ static enum nb_error nb_op_ys_init_node_infos(struct nb_op_yield_state *ys) } /* Move up to the container if on a leaf currently. */ - if (node && - !CHECK_FLAG(node->schema->nodetype, LYS_CONTAINER | LYS_LIST)) { + if (!CHECK_FLAG(node->schema->nodetype, LYS_CONTAINER | LYS_LIST)) { struct lyd_node *leaf = node; node = &node->parent->node; + /* Have to trim the leaf from the xpath now */ + ret = yang_xpath_pop_node(xpath); + if (ret != NB_OK) { + darr_free(xpath); + return ret; + } + /* * If the leaf is not a key, delete it, because it has a wrong * empty value. @@ -495,10 +553,7 @@ static enum nb_error nb_op_ys_init_node_infos(struct nb_op_yield_state *ys) if (!lysc_is_key(leaf->schema)) lyd_free_tree(leaf); } - assert(!node || - CHECK_FLAG(node->schema->nodetype, LYS_CONTAINER | LYS_LIST)); - if (!node) - return NB_ERR_NOT_FOUND; + assert(CHECK_FLAG(node->schema->nodetype, LYS_CONTAINER | LYS_LIST)); inner = node; for (len = 1; inner->parent; len++) @@ -511,26 +566,42 @@ static enum nb_error nb_op_ys_init_node_infos(struct nb_op_yield_state *ys) * -- save the prefix length. */ inner = node; + prevlen = 0; + xplen = strlen(xpath); + darr_free(xpath); for (i = len; i > 0; i--, inner = &inner->parent->node) { ni = &ys->node_infos[i - 1]; ni->inner = inner; ni->schema = inner->schema; + + if (i == len) { + prevlen = xplen; + ni->xpath_len = prevlen; + continue; + } + /* - * NOTE: we could build this by hand with a litte more effort, - * but this simple implementation works and won't be expensive - * since the number of nodes is small and only done once per - * query. + * The only predicates we should have are concrete ones at this + * point b/c of nb_op_xpath_to_trunk() above, so we aren't in + * danger of finding a division symbol in the path, only '/'s + * inside strings which frrstr_back_to_char skips over. */ - tmp = yang_dnode_get_path(inner, NULL, 0); - ni->xpath_len = strlen(tmp); - /* Replace users supplied xpath with the libyang returned value */ - if (i == len) - darr_in_strdup(ys->xpath, tmp); + assert(prevlen == xplen || ys->xpath[prevlen] == '/'); + if (prevlen != xplen) + ys->xpath[prevlen] = 0; + cur = frrstr_back_to_char(ys->xpath, '/'); + if (prevlen != xplen) + ys->xpath[prevlen] = '/'; + + if (!cur || cur == ys->xpath) { + flog_warn(EC_LIB_LIBYANG, "%s: error tokenizing query xpath: %s", __func__, + ys->xpath); + return NB_ERR_VALIDATION; + } - /* The prefix must match the prefix of the stored xpath */ - assert(!strncmp(tmp, ys->xpath, ni->xpath_len)); - free(tmp); + prevlen = cur - ys->xpath; + ni->xpath_len = prevlen; } /* @@ -584,6 +655,10 @@ static enum nb_error nb_op_iter_leaf(struct nb_op_yield_state *ys, if (lysc_is_key(snode)) return NB_OK; + /* Check for new simple get */ + if (nb_node->cbs.get) + return nb_node->cbs.get(nb_node, ni->list_entry, ni->inner); + data = nb_callback_get_elem(nb_node, xpath, ni->list_entry); if (data == NULL) return NB_OK; @@ -617,6 +692,10 @@ static enum nb_error nb_op_iter_leaflist(struct nb_op_yield_state *ys, if (CHECK_FLAG(snode->flags, LYS_CONFIG_W)) return NB_OK; + /* Check for new simple get */ + if (nb_node->cbs.get) + return nb_node->cbs.get(nb_node, ni->list_entry, ni->inner); + do { struct yang_data *data; @@ -1483,17 +1562,13 @@ static void nb_op_walk_continue(struct event *thread) ret = __walk(ys, true); if (ret == NB_YIELD) { - if (nb_op_yield(ys) != NB_OK) { - if (ys->should_batch) - goto stopped; - else - goto finish; - } - return; + ret = nb_op_yield(ys); + if (ret == NB_OK) + return; } finish: + assert(ret != NB_YIELD); (*ys->finish)(ys_root_node(ys), ys->finish_arg, ret); -stopped: nb_op_free_yield_state(ys, false); } @@ -1552,6 +1627,13 @@ static void nb_op_trim_yield_state(struct nb_op_yield_state *ys) (int)darr_lasti(ys->node_infos)); } +/** + * nb_op_yield() - Yield during the walk. + * @ys: the yield state tracking the walk. + * + * Return: Any error from the `ys->finish` callback which should terminate the + * walk. Otherwise if `ys->should_batch` == false always returns NB_OK. + */ static enum nb_error nb_op_yield(struct nb_op_yield_state *ys) { enum nb_error ret; @@ -1752,6 +1834,20 @@ bool nb_oper_is_yang_lib_query(const char *xpath) return strlen(xpath) > liblen; } +void *nb_oper_walk_finish_arg(void *walk) +{ + struct nb_op_yield_state *ys = walk; + + return ys->finish_arg; +} + +void *nb_oper_walk_cb_arg(void *walk) +{ + struct nb_op_yield_state *ys = walk; + + return ys->cb_arg; +} + void *nb_oper_walk(const char *xpath, struct yang_translator *translator, uint32_t flags, bool should_batch, nb_oper_data_cb cb, void *cb_arg, nb_oper_data_finish_cb finish, void *finish_arg) @@ -1764,17 +1860,13 @@ void *nb_oper_walk(const char *xpath, struct yang_translator *translator, ret = nb_op_walk_start(ys); if (ret == NB_YIELD) { - if (nb_op_yield(ys) != NB_OK) { - if (ys->should_batch) - goto stopped; - else - goto finish; - } - return ys; + ret = nb_op_yield(ys); + if (ret == NB_OK) + return ys; } -finish: + + assert(ret != NB_YIELD); (void)(*ys->finish)(ys_root_node(ys), ys->finish_arg, ret); -stopped: nb_op_free_yield_state(ys, false); return NULL; } @@ -1826,6 +1918,87 @@ enum nb_error nb_oper_iterate_legacy(const char *xpath, return ret; } +static const char *__adjust_ptr(struct lysc_node_leaf *lsnode, const char *valuep, size_t *size) +{ + switch (lsnode->type->basetype) { + case LY_TYPE_INT8: + case LY_TYPE_UINT8: +#ifdef BIG_ENDIAN + valuep += 7; +#endif + *size = 1; + break; + case LY_TYPE_INT16: + case LY_TYPE_UINT16: +#ifdef BIG_ENDIAN + valuep += 6; +#endif + *size = 2; + break; + case LY_TYPE_INT32: + case LY_TYPE_UINT32: +#ifdef BIG_ENDIAN + valuep += 4; +#endif + *size = 4; + break; + case LY_TYPE_INT64: + case LY_TYPE_UINT64: + *size = 8; + break; + case LY_TYPE_UNKNOWN: + case LY_TYPE_BINARY: + case LY_TYPE_STRING: + case LY_TYPE_BITS: + case LY_TYPE_BOOL: + case LY_TYPE_DEC64: + case LY_TYPE_EMPTY: + case LY_TYPE_ENUM: + case LY_TYPE_IDENT: + case LY_TYPE_INST: + case LY_TYPE_LEAFREF: + case LY_TYPE_UNION: + default: + assert(0); + } + return valuep; +} + +enum nb_error nb_oper_uint64_get(const struct nb_node *nb_node, const void *parent_list_entry, + struct lyd_node *parent) +{ + struct lysc_node_leaf *lsnode = (struct lysc_node_leaf *)nb_node->snode; + struct lysc_node *snode = &lsnode->node; + ssize_t offset = (ssize_t)nb_node->cbs.get_elem; + uint64_t ubigval = *(uint64_t *)((char *)parent_list_entry + offset); + const char *valuep; + size_t size; + + valuep = __adjust_ptr(lsnode, (const char *)&ubigval, &size); + if (lyd_new_term_bin(parent, snode->module, snode->name, valuep, size, LYD_NEW_PATH_UPDATE, + NULL)) + return NB_ERR_RESOURCE; + return NB_OK; +} + + +enum nb_error nb_oper_uint32_get(const struct nb_node *nb_node, const void *parent_list_entry, + struct lyd_node *parent) +{ + struct lysc_node_leaf *lsnode = (struct lysc_node_leaf *)nb_node->snode; + struct lysc_node *snode = &lsnode->node; + ssize_t offset = (ssize_t)nb_node->cbs.get_elem; + uint64_t ubigval = *(uint64_t *)((char *)parent_list_entry + offset); + const char *valuep; + size_t size; + + valuep = __adjust_ptr(lsnode, (const char *)&ubigval, &size); + if (lyd_new_term_bin(parent, snode->module, snode->name, valuep, size, LYD_NEW_PATH_UPDATE, + NULL)) + return NB_ERR_RESOURCE; + return NB_OK; +} + void nb_oper_init(struct event_loop *loop) { event_loop = loop; diff --git a/lib/plist.c b/lib/plist.c index 6950ab5761..713eee25ed 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -1536,7 +1536,6 @@ int prefix_bgp_show_prefix_list(struct vty *vty, afi_t afi, char *name, if (use_json) { json = json_object_new_object(); json_prefix = json_object_new_object(); - json_list = json_object_new_object(); json_object_int_add(json_prefix, "prefixListCounter", plist->count); @@ -1544,10 +1543,7 @@ int prefix_bgp_show_prefix_list(struct vty *vty, afi_t afi, char *name, plist->name); for (pentry = plist->head; pentry; pentry = pentry->next) { - struct prefix *p = &pentry->prefix; - char buf_a[BUFSIZ]; - - snprintf(buf_a, sizeof(buf_a), "%pFX", p); + json_list = json_object_new_object(); json_object_int_add(json_list, "seq", pentry->seq); json_object_string_add(json_list, "seqPrefixListType", @@ -1560,7 +1556,7 @@ int prefix_bgp_show_prefix_list(struct vty *vty, afi_t afi, char *name, json_object_int_add(json_list, "le", pentry->le); - json_object_object_add(json_prefix, buf_a, json_list); + json_object_object_addf(json_prefix, json_list, "%pFX", &pentry->prefix); } if (afi == AFI_IP) json_object_object_add(json, "ipPrefixList", diff --git a/lib/privs.c b/lib/privs.c index 717a2e48d6..e7df383e5d 100644 --- a/lib/privs.c +++ b/lib/privs.c @@ -179,7 +179,7 @@ static pset_t *zcaps2sys(zebra_capabilities_t *zcaps, int num) for (i = 0; i < num; i++) count += cap_map[zcaps[i]].num; - if ((syscaps = XCALLOC(MTYPE_PRIVS, (sizeof(pset_t) * num))) == NULL) { + if ((syscaps = XCALLOC(MTYPE_PRIVS, sizeof(pset_t))) == NULL) { fprintf(stderr, "%s: could not allocate syscaps!", __func__); return NULL; } @@ -210,10 +210,11 @@ int zprivs_change_caps(zebra_privs_ops_t op) { cap_flag_value_t cflag; - /* should be no possibility of being called without valid caps */ - assert(zprivs_state.syscaps_p && zprivs_state.caps); - if (!(zprivs_state.syscaps_p && zprivs_state.caps)) - exit(1); + /* Called without valid caps - just return. Not every daemon needs + * privs. + */ + if (zprivs_state.syscaps_p == NULL || zprivs_state.caps == NULL) + return 0; if (op == ZPRIVS_RAISE) cflag = CAP_SET; diff --git a/lib/route_types.txt b/lib/route_types.txt index 93cbc36e97..b5f8b6fdf3 100644 --- a/lib/route_types.txt +++ b/lib/route_types.txt @@ -88,7 +88,7 @@ ZEBRA_ROUTE_VRRP, vrrp, vrrpd, '-', 0, 0, 0, "VRRP", vr ZEBRA_ROUTE_NHG, zebra, none, '-', 0, 0, 0, "Nexthop Group", none ZEBRA_ROUTE_SRTE, srte, none, '-', 0, 0, 0, "SR-TE", none ZEBRA_ROUTE_TABLE_DIRECT, table-direct, zebra, 't', 1, 1, 1, "Table-Direct", zebra -ZEBRA_ROUTE_ALL, wildcard, none, '-', 0, 0, 0, "-", none +ZEBRA_ROUTE_ALL, any, none, '-', 0, 0, 0, "-", none ## help strings diff --git a/lib/routemap.h b/lib/routemap.h index 8dcc17ecc3..1c02348313 100644 --- a/lib/routemap.h +++ b/lib/routemap.h @@ -310,6 +310,7 @@ DECLARE_QOBJ_TYPE(route_map); (strmatch(C, "frr-bgp-route-map:ip-route-source")) #define IS_MATCH_ROUTE_SRC_PL(C) \ (strmatch(C, "frr-bgp-route-map:ip-route-source-prefix-list")) +#define IS_MATCH_COMMUNITY_LIMIT(C) (strmatch(C, "frr-bgp-route-map:match-community-limit")) #define IS_MATCH_COMMUNITY(C) \ (strmatch(C, "frr-bgp-route-map:match-community")) #define IS_MATCH_LCOMMUNITY(C) \ diff --git a/lib/routemap_cli.c b/lib/routemap_cli.c index 69b942064b..eb01709707 100644 --- a/lib/routemap_cli.c +++ b/lib/routemap_cli.c @@ -810,6 +810,10 @@ void route_map_condition_show(struct vty *vty, const struct lyd_node *dnode, yang_dnode_get_string( dnode, "./rmap-match-condition/frr-bgp-route-map:list-name")); + } else if (IS_MATCH_COMMUNITY_LIMIT(condition)) { + vty_out(vty, " match community-limit %s\n", + yang_dnode_get_string(dnode, + "./rmap-match-condition/frr-bgp-route-map:community-limit")); } else if (IS_MATCH_COMMUNITY(condition)) { vty_out(vty, " match community %s", yang_dnode_get_string( diff --git a/lib/srcdest_table.c b/lib/srcdest_table.c index 3247a0372c..7203c8ac8e 100644 --- a/lib/srcdest_table.c +++ b/lib/srcdest_table.c @@ -309,13 +309,3 @@ static ssize_t printfrr_rn(struct fbuf *buf, struct printfrr_eargs *ea, cbuf, sizeof(cbuf)); return bputs(buf, cbuf); } - -struct route_table *srcdest_srcnode_table(struct route_node *rn) -{ - if (rnode_is_dstnode(rn)) { - struct srcdest_rnode *srn = srcdest_rnode_from_rnode(rn); - - return srn->src_table; - } - return NULL; -} diff --git a/lib/srcdest_table.h b/lib/srcdest_table.h index ff97f9b735..a699d4a11b 100644 --- a/lib/srcdest_table.h +++ b/lib/srcdest_table.h @@ -87,8 +87,6 @@ static inline void *srcdest_rnode_table_info(struct route_node *rn) return route_table_get_info(srcdest_rnode_table(rn)); } -extern struct route_table *srcdest_srcnode_table(struct route_node *rn); - #ifdef __cplusplus } #endif diff --git a/lib/srv6.h b/lib/srv6.h index 9a041e3d85..7e4fb97ad1 100644 --- a/lib/srv6.h +++ b/lib/srv6.h @@ -22,6 +22,8 @@ #define SRV6_SID_FORMAT_NAME_SIZE 512 +#define DEFAULT_SRV6_IFNAME "sr0" + #ifdef __cplusplus extern "C" { #endif @@ -186,6 +188,42 @@ enum srv6_endpoint_behavior_codepoint { SRV6_ENDPOINT_BEHAVIOR_OPAQUE = 0xFFFF, }; +/* + * Convert SRv6 endpoint behavior codepoints to human-friendly string. + */ +static inline const char * +srv6_endpoint_behavior_codepoint2str(enum srv6_endpoint_behavior_codepoint behavior) +{ + switch (behavior) { + case SRV6_ENDPOINT_BEHAVIOR_RESERVED: + return "Reserved"; + case SRV6_ENDPOINT_BEHAVIOR_END: + return "End"; + case SRV6_ENDPOINT_BEHAVIOR_END_X: + return "End.X"; + case SRV6_ENDPOINT_BEHAVIOR_END_DT6: + return "End.DT6"; + case SRV6_ENDPOINT_BEHAVIOR_END_DT4: + return "End.DT4"; + case SRV6_ENDPOINT_BEHAVIOR_END_DT46: + return "End.DT46"; + case SRV6_ENDPOINT_BEHAVIOR_END_NEXT_CSID: + return "uN"; + case SRV6_ENDPOINT_BEHAVIOR_END_X_NEXT_CSID: + return "uA"; + case SRV6_ENDPOINT_BEHAVIOR_END_DT6_USID: + return "uDT6"; + case SRV6_ENDPOINT_BEHAVIOR_END_DT4_USID: + return "uDT4"; + case SRV6_ENDPOINT_BEHAVIOR_END_DT46_USID: + return "uDT46"; + case SRV6_ENDPOINT_BEHAVIOR_OPAQUE: + return "Opaque"; + } + + return "Unspec"; +} + struct nexthop_srv6 { /* SRv6 localsid info for Endpoint-behaviour */ enum seg6local_action_t seg6local_action; diff --git a/lib/subdir.am b/lib/subdir.am index 4bcce9a2b0..a975eb2fc4 100644 --- a/lib/subdir.am +++ b/lib/subdir.am @@ -84,6 +84,7 @@ lib_libfrr_la_SOURCES = \ lib/northbound.c \ lib/northbound_cli.c \ lib/northbound_db.c \ + lib/northbound_notif.c \ lib/northbound_oper.c \ lib/ntop.c \ lib/openbsd-tree.c \ @@ -144,6 +145,7 @@ lib_libfrr_la_SOURCES = \ nodist_lib_libfrr_la_SOURCES = \ yang/frr-affinity-map.yang.c \ + yang/frr-backend.yang.c \ yang/frr-filter.yang.c \ yang/frr-if-rmap.yang.c \ yang/frr-interface.yang.c \ @@ -22,6 +22,9 @@ #include "northbound.h" #include "northbound_cli.h" +/* Set by the owner (zebra). */ +bool vrf_notify_oper_changes; + /* default VRF name value used when VRF backend is not NETNS */ #define VRF_DEFAULT_NAME_INTERNAL "default" @@ -39,8 +42,7 @@ RB_GENERATE(vrf_name_head, vrf, name_entry, vrf_name_compare); struct vrf_id_head vrfs_by_id = RB_INITIALIZER(&vrfs_by_id); struct vrf_name_head vrfs_by_name = RB_INITIALIZER(&vrfs_by_name); -static int vrf_backend; -static int vrf_backend_configured; +static int vrf_backend = VRF_BACKEND_VRF_LITE; static char vrf_default_name[VRF_NAMSIZ] = VRF_DEFAULT_NAME_INTERNAL; /* @@ -105,6 +107,19 @@ int vrf_switchback_to_initial(void) return ret; } +static void vrf_update_state(struct vrf *vrf) +{ + if (!vrf->state || !vrf_notify_oper_changes) + return; + + /* + * Remove top level container update when we have patch support, for now + * this keeps us from generating 2 separate REPLACE messages though. + */ + nb_op_updatef(vrf->state, "id", "%u", vrf->vrf_id); + nb_op_update(vrf->state, "active", CHECK_FLAG(vrf->status, VRF_ACTIVE) ? "true" : "false"); +} + /* Get a VRF. If not found, create one. * Arg: * name - The name of the vrf. May be NULL if unknown. @@ -155,16 +170,32 @@ struct vrf *vrf_get(vrf_id_t vrf_id, const char *name) /* Set name */ if (name && vrf->name[0] != '\0' && strcmp(name, vrf->name)) { - /* update the vrf name */ + /* vrf name has changed */ + if (vrf_notify_oper_changes) { + nb_op_update_delete_pathf(NULL, "/frr-vrf:lib/vrf[name=\"%s\"]", vrf->name); + lyd_free_all(vrf->state); + } RB_REMOVE(vrf_name_head, &vrfs_by_name, vrf); - strlcpy(vrf->data.l.netns_name, - name, NS_NAMSIZ); + strlcpy(vrf->data.l.netns_name, name, NS_NAMSIZ); strlcpy(vrf->name, name, sizeof(vrf->name)); RB_INSERT(vrf_name_head, &vrfs_by_name, vrf); + /* New state with new name */ + if (vrf_notify_oper_changes) + vrf->state = nb_op_update_pathf(NULL, "/frr-vrf:lib/vrf[name=\"%s\"]/state", + NULL, vrf->name); } else if (name && vrf->name[0] == '\0') { strlcpy(vrf->name, name, sizeof(vrf->name)); RB_INSERT(vrf_name_head, &vrfs_by_name, vrf); + + /* We have a name now so we can have state */ + if (vrf_notify_oper_changes) + vrf->state = nb_op_update_pathf(NULL, "/frr-vrf:lib/vrf[name=\"%s\"]/state", + NULL, vrf->name); } + /* Update state before hook call */ + if (vrf->state) + vrf_update_state(vrf); + if (new &&vrf_master.vrf_new_hook) (*vrf_master.vrf_new_hook)(vrf); @@ -208,6 +239,7 @@ struct vrf *vrf_update(vrf_id_t new_vrf_id, const char *name) vrf->vrf_id = new_vrf_id; RB_INSERT(vrf_id_head, &vrfs_by_id, vrf); + vrf_update_state(vrf); } else { /* @@ -254,6 +286,11 @@ void vrf_delete(struct vrf *vrf) if (vrf->name[0] != '\0') RB_REMOVE(vrf_name_head, &vrfs_by_name, vrf); + if (vrf_notify_oper_changes) { + nb_op_update_delete_pathf(NULL, "/frr-vrf:lib/vrf[name=\"%s\"]", vrf->name); + lyd_free_all(vrf->state); + } + XFREE(MTYPE_VRF, vrf); } @@ -282,6 +319,8 @@ int vrf_enable(struct vrf *vrf) SET_FLAG(vrf->status, VRF_ACTIVE); + vrf_update_state(vrf); + if (vrf_master.vrf_enable_hook) (*vrf_master.vrf_enable_hook)(vrf); @@ -307,6 +346,8 @@ void vrf_disable(struct vrf *vrf) UNSET_FLAG(vrf->status, VRF_ACTIVE); + vrf_update_state(vrf); + if (debug_vrf) zlog_debug("VRF %s(%u) is to be disabled.", vrf->name, vrf->vrf_id); @@ -540,15 +581,6 @@ void vrf_init(int (*create)(struct vrf *), int (*enable)(struct vrf *), "vrf_init: failed to create the default VRF!"); exit(1); } - if (vrf_is_backend_netns()) { - struct ns *ns; - - strlcpy(default_vrf->data.l.netns_name, - VRF_DEFAULT_NAME, NS_NAMSIZ); - ns = ns_lookup(NS_DEFAULT); - ns->vrf_ctxt = default_vrf; - default_vrf->ns_ctxt = ns; - } /* Enable the default VRF. */ if (!vrf_enable(default_vrf)) { @@ -612,8 +644,6 @@ int vrf_is_backend_netns(void) int vrf_get_backend(void) { - if (!vrf_backend_configured) - return VRF_BACKEND_UNKNOWN; return vrf_backend; } @@ -621,7 +651,6 @@ int vrf_configure_backend(enum vrf_backend_type backend) { /* Work around issue in old gcc */ switch (backend) { - case VRF_BACKEND_UNKNOWN: case VRF_BACKEND_NETNS: case VRF_BACKEND_VRF_LITE: break; @@ -630,7 +659,6 @@ int vrf_configure_backend(enum vrf_backend_type backend) } vrf_backend = backend; - vrf_backend_configured = 1; return 0; } @@ -80,6 +80,8 @@ struct vrf { /* Back pointer to namespace context */ void *ns_ctxt; + struct lyd_node *state; + QOBJ_FIELDS; }; RB_HEAD(vrf_id_head, vrf); @@ -92,7 +94,6 @@ DECLARE_QOBJ_TYPE(vrf); enum vrf_backend_type { VRF_BACKEND_VRF_LITE, VRF_BACKEND_NETNS, - VRF_BACKEND_UNKNOWN, VRF_BACKEND_MAX, }; @@ -299,6 +300,7 @@ extern void vrf_disable(struct vrf *vrf); extern int vrf_enable(struct vrf *vrf); extern void vrf_delete(struct vrf *vrf); +extern bool vrf_notify_oper_changes; extern const struct frr_yang_module_info frr_vrf_info; extern const struct frr_yang_module_info frr_vrf_cli_info; diff --git a/lib/yang.c b/lib/yang.c index b847b8b77b..dd48d8861b 100644 --- a/lib/yang.c +++ b/lib/yang.c @@ -14,6 +14,7 @@ #include <libyang/version.h> #include "northbound.h" #include "frrstr.h" +#include "darr.h" #include "lib/config_paths.h" @@ -680,6 +681,116 @@ void yang_dnode_rpc_output_add(struct lyd_node *output, const char *xpath, assert(err == LY_SUCCESS); } +struct lyd_node *yang_state_new(struct lyd_node *tree, const char *path, const char *value) +{ + struct lyd_node *dnode, *parent; + LY_ERR err; + + err = lyd_new_path2(tree, ly_native_ctx, path, value, 0, 0, LYD_NEW_PATH_UPDATE, &parent, + &dnode); + assert(err == LY_SUCCESS); + + /* + * If the node exists and isn't updated returned dnode will be NULL, so + * we need to find it. But even if returned it can be the first newly + * created node (could be container of path) not the actual path dnode. + * So we always find. + */ + err = lyd_find_path(tree ?: parent, path, false, &dnode); + assert(err == LY_SUCCESS); + + return dnode; +} + +void yang_state_delete(struct lyd_node *tree, const char *path) +{ + LY_ERR err; + + if (!tree) + return; + + if (path) { + err = lyd_find_path(tree, path, false, &tree); + if (err != LY_SUCCESS) { + zlog_info("State %s has already been deleted", path); + return; + } + } + lyd_free_tree(tree); +} + +PRINTFRR(2, 0) +struct lyd_node *yang_state_new_vpathf(struct lyd_node *tree, const char *path_fmt, + const char *value, va_list ap) +{ + struct lyd_node *dnode; + char *path; + + path = darr_vsprintf(path_fmt, ap); + dnode = yang_state_new(tree, path, value); + darr_free(path); + + return dnode; +} + +struct lyd_node *yang_state_new_pathf(struct lyd_node *tree, const char *path_fmt, + const char *value, ...) +{ + struct lyd_node *dnode; + va_list ap; + + va_start(ap, value); + dnode = yang_state_new_vpathf(tree, path_fmt, value, ap); + va_end(ap); + + return dnode; +} + +PRINTFRR(2, 0) +void yang_state_delete_vpathf(struct lyd_node *tree, const char *path_fmt, va_list ap) +{ + char *path; + + path = darr_vsprintf(path_fmt, ap); + yang_state_delete(tree, path); + darr_free(path); +} + +void yang_state_delete_pathf(struct lyd_node *tree, const char *path_fmt, ...) +{ + va_list ap; + + va_start(ap, path_fmt); + yang_state_delete_vpathf(tree, path_fmt, ap); + va_end(ap); +} + +PRINTFRR(3, 0) +struct lyd_node *yang_state_vnewf(struct lyd_node *tree, const char *path, const char *val_fmt, + va_list ap) +{ + struct lyd_node *dnode; + char *value; + + value = darr_vsprintf(val_fmt, ap); + dnode = yang_state_new(tree, path, value); + darr_free(value); + + return dnode; +} + +struct lyd_node *yang_state_newf(struct lyd_node *tree, const char *path, const char *val_fmt, ...) +{ + struct lyd_node *dnode; + va_list ap; + + va_start(ap, val_fmt); + dnode = yang_state_vnewf(tree, path, val_fmt, ap); + va_end(ap); + + return dnode; +} + struct yang_data *yang_data_new(const char *xpath, const char *value) { struct yang_data *data; @@ -763,6 +874,60 @@ static void ly_zlog_cb(LY_LOG_LEVEL level, const char *msg, const char *data_pat zlog(priority, "libyang: %s", msg); } +LY_ERR yang_parse_data(const char *xpath, LYD_FORMAT format, bool as_subtree, bool is_oper, + bool validate, const char *data, struct lyd_node **tree) +{ + struct ly_in *in = NULL; + struct lyd_node *subtree = NULL; + uint32_t parse_options = LYD_PARSE_STRICT | LYD_PARSE_ONLY; + uint32_t validate_options = LYD_VALIDATE_PRESENT; + LY_ERR err; + + err = ly_in_new_memory(data, &in); + if (err != LY_SUCCESS) + return err; + + if (as_subtree) { + struct lyd_node *parent; + + /* + * Create the subtree branch from root using the xpath. This + * will be used below to parse the data rooted at the subtree -- + * a common YANG JSON technique (vs XML which starts all + * data trees from the root). + */ + err = lyd_new_path2(NULL, ly_native_ctx, xpath, NULL, 0, 0, 0, &parent, &subtree); + if (err != LY_SUCCESS) + goto done; + err = lyd_find_path(parent, xpath, false, &subtree); + if (err != LY_SUCCESS) + goto done; + } + + if (is_oper) + validate_options |= LYD_VALIDATE_OPERATIONAL; + +#ifdef LYD_VALIDATE_NOT_FINAL + if (!validate) + validate_options |= LYD_VALIDATE_NOT_FINAL; +#endif + + err = lyd_parse_data(ly_native_ctx, subtree, in, format, parse_options, validate_options, + tree); + if (err == LY_SUCCESS && subtree) + *tree = subtree; +done: + ly_in_free(in, 0); + if (err != LY_SUCCESS) { + if (*tree) + lyd_free_all(*tree); + else if (subtree) + lyd_free_all(subtree); + *tree = NULL; + } + return err; +} + LY_ERR yang_parse_notification(const char *xpath, LYD_FORMAT format, const char *data, struct lyd_node **notif) { diff --git a/lib/yang.h b/lib/yang.h index 52857ecf00..748f089037 100644 --- a/lib/yang.h +++ b/lib/yang.h @@ -535,6 +535,66 @@ extern struct lyd_node *yang_dnode_dup(const struct lyd_node *dnode); */ extern void yang_dnode_free(struct lyd_node *dnode); +/** + * yang_state_new() - Create new state data. + * @tree: subtree @path is relative to or NULL in which case @path must be + * absolute. + * @path: The path of the state node to create. + * @value: The canonical value of the state. + * + * Return: The new libyang node. + */ +extern struct lyd_node *yang_state_new(struct lyd_node *tree, const char *path, const char *value); + +/** + * yang_state_delete() - Delete state data. + * @tree: subtree @path is relative to or NULL in which case @path must be + * absolute. + * @path: The path of the state node to delete, or NULL if @tree should just be + * deleted. + */ +extern void yang_state_delete(struct lyd_node *tree, const char *path); + +/** + * yang_state_new_pathf() - Create new state data. + * @tree: subtree @path_fmt is relative to or NULL in which case @path_fmt must + * be absolute. + * @path_fmt: The path format string of the state node to create. + * @value: The canonical value of the state. + * @...: The values to substitute into @path_fmt. + * + * Return: The new libyang node. + */ +extern struct lyd_node *yang_state_new_pathf(struct lyd_node *tree, const char *path_fmt, + const char *value, ...) PRINTFRR(2, 4); +extern struct lyd_node *yang_state_new_vpathf(struct lyd_node *tree, const char *path_fmt, + const char *value, va_list ap); +/** + * yang_state_delete_pathf() - Delete state data. + * @tree: subtree @path_fmt is relative to or NULL in which case @path_fmt must + * be absolute. + * @path: The path of the state node to delete. + * @...: The values to substitute into @path_fmt. + */ +extern void yang_state_delete_pathf(struct lyd_node *tree, const char *path_fmt, ...) PRINTFRR(2, 3); +extern void yang_state_delete_vpathf(struct lyd_node *tree, const char *path_fmt, va_list ap); + +/** + * yang_state_newf() - Create new state data. + * @tree: subtree @path is relative to or NULL in which case @path must be + * absolute. + * @path: The path of the state node to create. + * @val_fmt: The value format string to set the canonical value of the state. + * @...: The values to substitute into @val_fmt. + * + * Return: The new libyang node. + */ +extern struct lyd_node *yang_state_newf(struct lyd_node *tree, const char *path, + const char *val_fmt, ...) PRINTFRR(3, 4); + +extern struct lyd_node *yang_state_vnewf(struct lyd_node *tree, const char *path, + const char *val_fmt, va_list ap); + /* * Add a libyang data node to an RPC/action output container. * @@ -621,6 +681,25 @@ extern struct ly_ctx *yang_ctx_new_setup(bool embedded_modules, bool explicit_co */ extern void yang_debugging_set(bool enable); + +/* + * Parse YANG data. + * + * Args: + * xpath: xpath of the data. + * format: LYD_FORMAT of input data. + * as_subtree: parse the data as starting at the subtree identified by xpath. + * is_oper: parse as operational state allows for invalid (logs warning). + * validate: validate the data (otherwise treat as non-final). + * data: input data. + * notif: pointer to the libyang data tree to store the parsed notification. + * If the notification is not on the top level of the yang model, + * the pointer to the notification node is still returned, but it's + * part of the full data tree with all its parents. + */ +LY_ERR yang_parse_data(const char *xpath, LYD_FORMAT format, bool as_subtree, bool is_oper, + bool validate, const char *data, struct lyd_node **tree); + /* * Parse a YANG notification. * diff --git a/lib/zclient.c b/lib/zclient.c index 063944fd3b..d8c75c9029 100644 --- a/lib/zclient.c +++ b/lib/zclient.c @@ -31,6 +31,7 @@ DEFINE_MTYPE_STATIC(LIB, ZCLIENT, "Zclient"); DEFINE_MTYPE_STATIC(LIB, REDIST_INST, "Redistribution instance IDs"); +DEFINE_MTYPE_STATIC(LIB, REDIST_TABLE_DIRECT, "Redistribution table direct"); /* Zebra client events. */ enum zclient_event { ZCLIENT_SCHEDULE, ZCLIENT_READ, ZCLIENT_CONNECT }; @@ -104,6 +105,11 @@ void zclient_free(struct zclient *zclient) XFREE(MTYPE_ZCLIENT, zclient); } +static void redist_free_instance(void *data) +{ + XFREE(MTYPE_REDIST_INST, data); +} + unsigned short *redist_check_instance(struct redist_proto *red, unsigned short instance) { @@ -126,8 +132,10 @@ void redist_add_instance(struct redist_proto *red, unsigned short instance) red->enabled = 1; - if (!red->instances) + if (!red->instances) { red->instances = list_new(); + red->instances->del = redist_free_instance; + } in = XMALLOC(MTYPE_REDIST_INST, sizeof(unsigned short)); *in = instance; @@ -143,23 +151,100 @@ void redist_del_instance(struct redist_proto *red, unsigned short instance) return; listnode_delete(red->instances, id); - XFREE(MTYPE_REDIST_INST, id); + red->instances->del(id); if (!red->instances->count) { red->enabled = 0; list_delete(&red->instances); } } -void redist_del_all_instances(struct redist_proto *red) +static void redist_free_table_direct(void *data) { - struct listnode *ln, *nn; - unsigned short *id; + XFREE(MTYPE_REDIST_TABLE_DIRECT, data); +} + +struct redist_table_direct *redist_lookup_table_direct(const struct redist_proto *red, + const struct redist_table_direct *table) +{ + struct redist_table_direct *ntable; + struct listnode *node; + + if (red->instances == NULL) + return NULL; + + for (ALL_LIST_ELEMENTS_RO(red->instances, node, ntable)) { + if (table->vrf_id != ntable->vrf_id) + continue; + if (table->table_id != ntable->table_id) + continue; + + return ntable; + } + + return NULL; +} + +bool redist_table_direct_has_id(const struct redist_proto *red, int table_id) +{ + struct redist_table_direct *table; + struct listnode *node; + + if (red->instances == NULL) + return false; + + for (ALL_LIST_ELEMENTS_RO(red->instances, node, table)) { + if (table->table_id != table_id) + continue; + + return true; + } + + return false; +} + +void redist_add_table_direct(struct redist_proto *red, const struct redist_table_direct *table) +{ + struct redist_table_direct *ntable; + + ntable = redist_lookup_table_direct(red, table); + if (ntable != NULL) + return; + + if (red->instances == NULL) { + red->instances = list_new(); + red->instances->del = redist_free_table_direct; + } + + red->enabled = 1; + + ntable = XCALLOC(MTYPE_REDIST_TABLE_DIRECT, sizeof(*ntable)); + ntable->vrf_id = table->vrf_id; + ntable->table_id = table->table_id; + listnode_add(red->instances, ntable); +} + +void redist_del_table_direct(struct redist_proto *red, const struct redist_table_direct *table) +{ + struct redist_table_direct *ntable; + ntable = redist_lookup_table_direct(red, table); + if (ntable == NULL) + return; + + listnode_delete(red->instances, ntable); + red->instances->del(ntable); + if (red->instances->count == 0) { + red->enabled = 0; + list_delete(&red->instances); + } +} + +void redist_del_all_instances(struct redist_proto *red) +{ if (!red->instances) return; - for (ALL_LIST_ELEMENTS(red->instances, ln, nn, id)) - redist_del_instance(red, *id); + list_delete(&red->instances); } /* Stop zebra client services. */ @@ -480,6 +565,17 @@ enum zclient_send_status zclient_send_localsid(struct zclient *zclient, return zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api); } +static void zclient_send_table_direct(struct zclient *zclient, afi_t afi, int type) +{ + struct redist_table_direct *table; + struct redist_proto *red = &zclient->mi_redist[afi][ZEBRA_ROUTE_TABLE_DIRECT]; + struct listnode *node; + + for (ALL_LIST_ELEMENTS_RO(red->instances, node, table)) + zebra_redistribute_send(type, zclient, afi, ZEBRA_ROUTE_TABLE_DIRECT, + table->table_id, table->vrf_id); +} + /* Send register requests to zebra daemon for the information in a VRF. */ void zclient_send_reg_requests(struct zclient *zclient, vrf_id_t vrf_id) { @@ -513,6 +609,12 @@ void zclient_send_reg_requests(struct zclient *zclient, vrf_id_t vrf_id) if (!zclient->mi_redist[afi][i].enabled) continue; + if (i == ZEBRA_ROUTE_TABLE_DIRECT) { + zclient_send_table_direct(zclient, afi, + ZEBRA_REDISTRIBUTE_ADD); + continue; + } + struct listnode *node; unsigned short *id; @@ -580,6 +682,12 @@ void zclient_send_dereg_requests(struct zclient *zclient, vrf_id_t vrf_id) if (!zclient->mi_redist[afi][i].enabled) continue; + if (i == ZEBRA_ROUTE_TABLE_DIRECT) { + zclient_send_table_direct(zclient, afi, + ZEBRA_REDISTRIBUTE_DELETE); + continue; + } + struct listnode *node; unsigned short *id; @@ -2016,6 +2124,15 @@ bool zapi_route_notify_decode(struct stream *s, struct prefix *p, enum zapi_route_notify_owner *note, afi_t *afi, safi_t *safi) { + struct prefix dummy; + + return zapi_route_notify_decode_srcdest(s, p, &dummy, tableid, note, afi, safi); +} + +bool zapi_route_notify_decode_srcdest(struct stream *s, struct prefix *p, struct prefix *src_p, + uint32_t *tableid, enum zapi_route_notify_owner *note, + afi_t *afi, safi_t *safi) +{ uint32_t t; afi_t afi_val; safi_t safi_val; @@ -2025,6 +2142,9 @@ bool zapi_route_notify_decode(struct stream *s, struct prefix *p, STREAM_GETC(s, p->family); STREAM_GETC(s, p->prefixlen); STREAM_GET(&p->u.prefix, s, prefix_blen(p)); + src_p->family = p->family; + STREAM_GETC(s, src_p->prefixlen); + STREAM_GET(&src_p->u.prefix, s, prefix_blen(src_p)); STREAM_GETL(s, t); STREAM_GETC(s, afi_val); STREAM_GETC(s, safi_val); @@ -4634,9 +4754,52 @@ static void zclient_read(struct event *thread) zclient_event(ZCLIENT_READ, zclient); } +static void zclient_redistribute_table_direct(struct zclient *zclient, vrf_id_t vrf_id, afi_t afi, + int instance, int command) +{ + struct redist_proto *red = &zclient->mi_redist[afi][ZEBRA_ROUTE_TABLE_DIRECT]; + bool has_table; + struct redist_table_direct table = { + .vrf_id = vrf_id, + .table_id = instance, + }; + + has_table = redist_lookup_table_direct(red, &table); + + if (command == ZEBRA_REDISTRIBUTE_ADD) { + if (has_table) + return; + + redist_add_table_direct(red, &table); + } else { + if (!has_table) + return; + + redist_del_table_direct(red, &table); + } + + if (zclient->sock > 0) + zebra_redistribute_send(command, zclient, afi, ZEBRA_ROUTE_TABLE_DIRECT, instance, + vrf_id); +} + void zclient_redistribute(int command, struct zclient *zclient, afi_t afi, int type, unsigned short instance, vrf_id_t vrf_id) { + /* + * When asking for table-direct redistribution the parameter + * `instance` has a different meaning: it means table + * identification. + * + * The table identification information is stored in + * `zclient->mi_redist` along with the VRF identification + * information in a pair (different from the usual single protocol + * instance value). + */ + if (type == ZEBRA_ROUTE_TABLE_DIRECT) { + zclient_redistribute_table_direct(zclient, vrf_id, afi, instance, command); + return; + } if (instance) { if (command == ZEBRA_REDISTRIBUTE_ADD) { diff --git a/lib/zclient.h b/lib/zclient.h index 2385a8a219..afd84acce2 100644 --- a/lib/zclient.h +++ b/lib/zclient.h @@ -268,6 +268,21 @@ struct redist_proto { struct list *instances; }; +/** + * Redistribute table direct instance data structure: keeps the VRF + * that subscribed to the table ID. + * + * **NOTE** + * `table_id` is an integer because that is what the netlink interface + * uses for route attribute RTA_TABLE (32bit int), however the whole + * zclient API uses `unsigned short` (and CLI commands) so it will be + * limited to the range 1 to 65535. + */ +struct redist_table_direct { + vrf_id_t vrf_id; + int table_id; +}; + struct zclient_capabilities { uint32_t ecmp; bool mpls_enabled; @@ -924,6 +939,15 @@ extern void redist_add_instance(struct redist_proto *, unsigned short); extern void redist_del_instance(struct redist_proto *, unsigned short); extern void redist_del_all_instances(struct redist_proto *red); +extern struct redist_table_direct * +redist_lookup_table_direct(const struct redist_proto *red, const struct redist_table_direct *table); +extern bool redist_table_direct_has_id(const struct redist_proto *red, int table_id); +extern void redist_add_table_direct(struct redist_proto *red, + const struct redist_table_direct *table); +extern void redist_del_table_direct(struct redist_proto *red, + const struct redist_table_direct *table); + + /* * Send to zebra that the specified vrf is using label to resolve * itself for L3VPN's. Repeated calls of this function with @@ -1144,6 +1168,9 @@ bool zapi_route_notify_decode(struct stream *s, struct prefix *p, uint32_t *tableid, enum zapi_route_notify_owner *note, afi_t *afi, safi_t *safi); +bool zapi_route_notify_decode_srcdest(struct stream *s, struct prefix *p, struct prefix *src_p, + uint32_t *tableid, enum zapi_route_notify_owner *note, + afi_t *afi, safi_t *safi); bool zapi_rule_notify_decode(struct stream *s, uint32_t *seqno, uint32_t *priority, uint32_t *unique, char *ifname, enum zapi_rule_notify_owner *note); diff --git a/m4/ax_lua.m4 b/m4/ax_lua.m4 index f4236cf08a..60bdf3da37 100644 --- a/m4/ax_lua.m4 +++ b/m4/ax_lua.m4 @@ -1,5 +1,5 @@ # =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_lua.html +# https://www.gnu.org/software/autoconf-archive/ax_lua.html # =========================================================================== # # SYNOPSIS @@ -19,7 +19,7 @@ # header is checked to match the Lua interpreter version exactly. When # searching for Lua libraries, the version number is used as a suffix. # This is done with the goal of supporting multiple Lua installs (5.1, -# 5.2, and 5.3 side-by-side). +# 5.2, 5.3, and 5.4 side-by-side). # # A note on compatibility with previous versions: This file has been # mostly rewritten for serial 18. Most developers should be able to use @@ -49,6 +49,14 @@ # interpreter. If LUA is blank, the user's path is searched for an # suitable interpreter. # +# Optionally a LUAJIT option may be set ahead of time to look for and +# validate a LuaJIT install instead of PUC Lua. Usage might look like: +# +# AC_ARG_WITH(luajit, [AS_HELP_STRING([--with-luajit], +# [Prefer LuaJIT over PUC Lua, even if the latter is newer. Default: no]) +# ]) +# AM_CONDITIONAL([LUAJIT], [test "x$with_luajit" != 'xno']) +# # If MINIMUM-VERSION is supplied, then only Lua interpreters with a # version number greater or equal to MINIMUM-VERSION will be accepted. If # TOO-BIG-VERSION is also supplied, then only Lua interpreters with a @@ -82,7 +90,7 @@ # appropriate Automake primary, e.g. lua_SCRIPS or luaexec_LIBRARIES. # # If an acceptable Lua interpreter is found, then ACTION-IF-FOUND is -# performed, otherwise ACTION-IF-NOT-FOUND is preformed. If ACTION-IF-NOT- +# performed, otherwise ACTION-IF-NOT-FOUND is performed. If ACTION-IF-NOT- # FOUND is blank, then it will default to printing an error. To prevent # the default behavior, give ':' as an action. # @@ -152,6 +160,7 @@ # # LICENSE # +# Copyright (c) 2023 Caleb Maclennan <caleb@alerque.com> # Copyright (c) 2015 Reuben Thomas <rrt@sc3d.org> # Copyright (c) 2014 Tim Perkins <tprk77@gmail.com> # @@ -166,7 +175,7 @@ # Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program. If not, see <http://www.gnu.org/licenses/>. +# with this program. If not, see <https://www.gnu.org/licenses/>. # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure @@ -181,7 +190,7 @@ # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. -#serial 39 +#serial 47 dnl ========================================================================= dnl AX_PROG_LUA([MINIMUM-VERSION], [TOO-BIG-VERSION], @@ -197,13 +206,13 @@ AC_DEFUN([AX_PROG_LUA], AC_ARG_VAR([LUA], [The Lua interpreter, e.g. /usr/bin/lua5.1]) dnl Find a Lua interpreter. - m4_define_default([_AX_LUA_INTERPRETER_LIST], - [lua lua5.3 lua53 lua5.2 lua52 lua5.1 lua51 lua50]) + m4_define_default([_ax_lua_interpreter_list], + [lua lua5.4 lua54 lua5.3 lua53 lua5.2 lua52 lua5.1 lua51 lua50]) m4_if([$1], [], [ dnl No version check is needed. Find any Lua interpreter. AS_IF([test "x$LUA" = 'x'], - [AC_PATH_PROGS([LUA], [_AX_LUA_INTERPRETER_LIST], [:])]) + [AC_PATH_PROGS([LUA], [_ax_lua_interpreter_list], [:])]) ax_display_LUA='lua' AS_IF([test "x$LUA" != 'x:'], @@ -242,7 +251,7 @@ AC_DEFUN([AX_PROG_LUA], [_ax_check_text="for a Lua interpreter with version >= $1, < $2"]) AC_CACHE_CHECK([$_ax_check_text], [ax_cv_pathless_LUA], - [ for ax_cv_pathless_LUA in _AX_LUA_INTERPRETER_LIST none; do + [ for ax_cv_pathless_LUA in _ax_lua_interpreter_list none; do test "x$ax_cv_pathless_LUA" = 'xnone' && break _AX_LUA_CHK_IS_INTRP([$ax_cv_pathless_LUA], [], [continue]) _AX_LUA_CHK_VER([$ax_cv_pathless_LUA], [$1], [$2], [break]) @@ -268,7 +277,7 @@ AC_DEFUN([AX_PROG_LUA], ax_cv_lua_version=[`$LUA -e ' -- return a version number in X.Y format local _, _, ver = string.find(_VERSION, "^Lua (%d+%.%d+)") - print(ver)'`] + print(ver or "")'`] ]) AS_IF([test "x$ax_cv_lua_version" = 'x'], [AC_MSG_ERROR([invalid Lua version number])]) @@ -469,7 +478,7 @@ AC_DEFUN([AX_LUA_HEADERS], dnl Some default directories to search. LUA_SHORT_VERSION=`echo "$LUA_VERSION" | $SED 's|\.||'` - m4_define_default([_AX_LUA_INCLUDE_LIST], + m4_define_default([_ax_lua_include_list], [ /usr/include/lua$LUA_VERSION \ /usr/include/lua-$LUA_VERSION \ /usr/include/lua/$LUA_VERSION \ @@ -488,9 +497,11 @@ AC_DEFUN([AX_LUA_HEADERS], dnl Try some other directories if LUA_INCLUDE was not set. AS_IF([test "x$LUA_INCLUDE" = 'x' && - test "x$ac_cv_header_lua_h" != 'xyes'], + test "x$ac_cv_header_lua_h" != 'xyes' || + test "x$with_luajit" != 'xno' && + test "x$ac_cv_header_luajit_h" != 'xyes'], [ dnl Try some common include paths. - for _ax_include_path in _AX_LUA_INCLUDE_LIST; do + for _ax_include_path in _ax_lua_include_list; do test ! -d "$_ax_include_path" && continue AC_MSG_CHECKING([for Lua headers in]) @@ -500,6 +511,7 @@ AC_DEFUN([AX_LUA_HEADERS], AS_UNSET([ac_cv_header_lualib_h]) AS_UNSET([ac_cv_header_lauxlib_h]) AS_UNSET([ac_cv_header_luaconf_h]) + AS_UNSET([ac_cv_header_luajit_h]) _ax_lua_saved_cppflags=$CPPFLAGS CPPFLAGS="$CPPFLAGS -I$_ax_include_path" @@ -514,24 +526,42 @@ AC_DEFUN([AX_LUA_HEADERS], ]) AS_IF([test "x$ac_cv_header_lua_h" = 'xyes'], - [ AC_CACHE_CHECK([for Lua header version], - [ax_cv_lua_header_version], - [ - ax_cv_lua_header_version=`echo LUA_VERSION | \ - $CC -P -E $LUA_INCLUDE -imacros lua.h - | \ - $SED -e 's%"@<:@@<:@:space:@:>@@:>@*"%%g' -e 's%^@<:@@<:@:space:@:>@@:>@*%%' | \ - tr -d '"\n' | \ - $SED -n "s|^Lua \(@<:@0-9@:>@\{1,\}\.@<:@0-9@:>@\{1,\}\).\{0,\}|\1|p"` - ]) - - dnl Compare this to the previously found LUA_VERSION. - AC_MSG_CHECKING([if Lua header version matches $LUA_VERSION]) - AS_IF([test "x$ax_cv_lua_header_version" = "x$LUA_VERSION"], - [ AC_MSG_RESULT([yes]) - ax_header_version_match='yes' + [ dnl Make a program to print LUA_VERSION defined in the header. + dnl TODO It would be really nice if we could do this without compiling a + dnl program, then it would work when cross compiling. But I'm not sure how + dnl to do this reliably. For now, assume versions match when cross compiling. + + AS_IF([test "x$cross_compiling" != 'xyes'], + [ AC_CACHE_CHECK([for Lua header version], + [ax_cv_lua_header_version], + [ _ax_lua_saved_cppflags=$CPPFLAGS + CPPFLAGS="$CPPFLAGS $LUA_INCLUDE" + AC_COMPUTE_INT(ax_cv_lua_header_version_major,[LUA_VERSION_NUM/100],[AC_INCLUDES_DEFAULT +#include <lua.h> +],[ax_cv_lua_header_version_major=unknown]) + AC_COMPUTE_INT(ax_cv_lua_header_version_minor,[LUA_VERSION_NUM%100],[AC_INCLUDES_DEFAULT +#include <lua.h> +],[ax_cv_lua_header_version_minor=unknown]) + AS_IF([test "x$ax_cv_lua_header_version_major" = xunknown || test "x$ax_cv_lua_header_version_minor" = xunknown],[ + ax_cv_lua_header_version=unknown + ],[ + ax_cv_lua_header_version="$ax_cv_lua_header_version_major.$ax_cv_lua_header_version_minor" + ]) + CPPFLAGS=$_ax_lua_saved_cppflags + ]) + + dnl Compare this to the previously found LUA_VERSION. + AC_MSG_CHECKING([if Lua header version matches $LUA_VERSION]) + AS_IF([test "x$ax_cv_lua_header_version" = "x$LUA_VERSION"], + [ AC_MSG_RESULT([yes]) + ax_header_version_match='yes' + ], + [ AC_MSG_RESULT([no]) + ax_header_version_match='no' + ]) ], - [ AC_MSG_RESULT([no]) - ax_header_version_match='no' + [ AC_MSG_WARN([cross compiling so assuming header version number matches]) + ax_header_version_match='yes' ]) ]) @@ -612,7 +642,7 @@ AC_DEFUN([AX_LUA_LIBS], ], [_ax_found_lua_libs='yes'], [_ax_found_lua_libs='no'], - [$_ax_lua_extra_libs]) + [$_ax_lua_extra_libs])]) LIBS=$_ax_lua_saved_libs AS_IF([test "x$ac_cv_search_lua_load" != 'xno' && diff --git a/mgmtd/mgmt_be_adapter.c b/mgmtd/mgmt_be_adapter.c index 45e154d83b..8f70490379 100644 --- a/mgmtd/mgmt_be_adapter.c +++ b/mgmtd/mgmt_be_adapter.c @@ -77,12 +77,20 @@ static const char *const zebra_config_xpaths[] = { }; static const char *const zebra_oper_xpaths[] = { + "/frr-backend:clients", "/frr-interface:lib/interface", "/frr-vrf:lib/vrf/frr-zebra:zebra", "/frr-zebra:zebra", NULL, }; +#ifdef HAVE_MGMTD_TESTC +static const char *const mgmtd_testc_oper_xpaths[] = { + "/frr-backend:clients", + NULL, +}; +#endif + #ifdef HAVE_RIPD static const char *const ripd_config_xpaths[] = { "/frr-filter:lib", @@ -94,6 +102,7 @@ static const char *const ripd_config_xpaths[] = { NULL, }; static const char *const ripd_oper_xpaths[] = { + "/frr-backend:clients", "/frr-ripd:ripd", "/ietf-key-chain:key-chains", NULL, @@ -114,6 +123,7 @@ static const char *const ripngd_config_xpaths[] = { NULL, }; static const char *const ripngd_oper_xpaths[] = { + "/frr-backend:clients", "/frr-ripngd:ripngd", NULL, }; @@ -130,6 +140,11 @@ static const char *const staticd_config_xpaths[] = { "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd", NULL, }; + +static const char *const staticd_oper_xpaths[] = { + "/frr-backend:clients", + NULL, +}; #endif static const char *const *be_client_config_xpaths[MGMTD_BE_CLIENT_ID_MAX] = { @@ -146,12 +161,18 @@ static const char *const *be_client_config_xpaths[MGMTD_BE_CLIENT_ID_MAX] = { }; static const char *const *be_client_oper_xpaths[MGMTD_BE_CLIENT_ID_MAX] = { +#ifdef HAVE_MGMTD_TESTC + [MGMTD_BE_CLIENT_ID_TESTC] = mgmtd_testc_oper_xpaths, +#endif #ifdef HAVE_RIPD [MGMTD_BE_CLIENT_ID_RIPD] = ripd_oper_xpaths, #endif #ifdef HAVE_RIPNGD [MGMTD_BE_CLIENT_ID_RIPNGD] = ripngd_oper_xpaths, #endif +#ifdef HAVE_STATICD + [MGMTD_BE_CLIENT_ID_STATICD] = staticd_oper_xpaths, +#endif [MGMTD_BE_CLIENT_ID_ZEBRA] = zebra_oper_xpaths, }; @@ -320,7 +341,7 @@ static void mgmt_be_xpath_map_init(void) __dbg("Total Cfg XPath Maps: %u", darr_len(be_cfg_xpath_map)); __dbg("Total Oper XPath Maps: %u", darr_len(be_oper_xpath_map)); - __dbg("Total Noitf XPath Maps: %u", darr_len(be_notif_xpath_map)); + __dbg("Total Notif XPath Maps: %u", darr_len(be_notif_xpath_map)); __dbg("Total RPC XPath Maps: %u", darr_len(be_rpc_xpath_map)); } @@ -651,13 +672,17 @@ int mgmt_be_send_native(enum mgmt_be_client_id id, void *msg) return mgmt_msg_native_send_msg(adapter->conn, msg, false); } +/* + * Send notification to back-ends that subscribed for them. + */ static void mgmt_be_adapter_send_notify(struct mgmt_msg_notify_data *msg, size_t msglen) { struct mgmt_be_client_adapter *adapter; struct mgmt_be_xpath_map *map; - struct nb_node *nb_node; + struct nb_node *nb_node = NULL; const char *notif; + bool is_root; uint id, len; if (!darr_len(be_notif_xpath_map)) @@ -669,28 +694,34 @@ static void mgmt_be_adapter_send_notify(struct mgmt_msg_notify_data *msg, return; } - nb_node = nb_node_find(notif); - if (!nb_node) { - __log_err("No schema found for notification: %s", notif); - return; + is_root = !strcmp(notif, "/"); + if (!is_root) { + nb_node = nb_node_find(notif); + if (!nb_node) { + __log_err("No schema found for notification: %s", notif); + return; + } } darr_foreach_p (be_notif_xpath_map, map) { - len = strlen(map->xpath_prefix); - if (strncmp(map->xpath_prefix, nb_node->xpath, len) && - strncmp(map->xpath_prefix, notif, len)) - continue; - + if (!is_root) { + len = strlen(map->xpath_prefix); + if (strncmp(map->xpath_prefix, nb_node->xpath, len) && + strncmp(map->xpath_prefix, notif, len)) + continue; + } FOREACH_BE_CLIENT_BITS (id, map->clients) { adapter = mgmt_be_get_adapter_by_id(id); if (!adapter) continue; + msg_conn_send_msg(adapter->conn, MGMT_MSG_VERSION_NATIVE, msg, msglen, NULL, false); } } } + /* * Handle a native encoded message */ @@ -735,6 +766,9 @@ static void be_adapter_handle_native_msg(struct mgmt_be_client_adapter *adapter, mgmt_txn_notify_rpc_reply(adapter, rpc_msg, msg_len); break; case MGMT_MSG_CODE_NOTIFY: + /* + * Handle notify message from a back-end client + */ notify_msg = (typeof(notify_msg))msg; __dbg("Got NOTIFY from '%s'", adapter->name); mgmt_be_adapter_send_notify(notify_msg, msg_len); diff --git a/mgmtd/mgmt_fe_adapter.c b/mgmtd/mgmt_fe_adapter.c index 7f7a5d9a8e..8d59198803 100644 --- a/mgmtd/mgmt_fe_adapter.c +++ b/mgmtd/mgmt_fe_adapter.c @@ -9,6 +9,7 @@ #include <zebra.h> #include "darr.h" +#include "frrstr.h" #include "sockopt.h" #include "network.h" #include "libfrr.h" @@ -31,6 +32,7 @@ #define FOREACH_ADAPTER_IN_LIST(adapter) \ frr_each_safe (mgmt_fe_adapters, &mgmt_fe_adapters, (adapter)) + enum mgmt_session_event { MGMTD_FE_SESSION_CFG_TXN_CLNUP = 1, MGMTD_FE_SESSION_SHOW_TXN_CLNUP, @@ -55,6 +57,22 @@ DECLARE_LIST(mgmt_fe_sessions, struct mgmt_fe_session_ctx, list_linkage); #define FOREACH_SESSION_IN_LIST(adapter, session) \ frr_each_safe (mgmt_fe_sessions, &(adapter)->fe_sessions, (session)) +/* + * A tree for storing unique notify-select strings. + */ +PREDECL_RBTREE_UNIQ(ns_string); +struct ns_string { + struct ns_string_item link; + struct list *sessions; + char s[]; +}; +static uint32_t ns_string_compare(const struct ns_string *ns1, const struct ns_string *ns2); +DECLARE_RBTREE_UNIQ(ns_string, struct ns_string, link, ns_string_compare); + +/* ---------------- */ +/* Global variables */ +/* ---------------- */ + static struct event_loop *mgmt_loop; static struct msg_server mgmt_fe_server = {.fd = -1}; @@ -63,6 +81,89 @@ static struct mgmt_fe_adapters_head mgmt_fe_adapters; static struct hash *mgmt_fe_sessions; static uint64_t mgmt_fe_next_session_id; +static struct ns_string_head mgmt_fe_ns_strings; + +/* ------------------------------ */ +/* Notify select string functions */ +/* ------------------------------ */ + +static uint32_t ns_string_compare(const struct ns_string *ns1, const struct ns_string *ns2) +{ + return strcmp(ns1->s, ns2->s); +} + +static void mgmt_fe_free_ns_string(struct ns_string *ns) +{ + list_delete(&ns->sessions); + XFREE(MTYPE_MGMTD_XPATH, ns); +} + +static void mgmt_fe_free_ns_strings(struct ns_string_head *head) +{ + struct ns_string *ns; + + while ((ns = ns_string_pop(head))) + mgmt_fe_free_ns_string(ns); + ns_string_fini(head); +} + +static uint64_t mgmt_fe_ns_string_remove_session(struct ns_string_head *head, + struct mgmt_fe_session_ctx *session) +{ + struct listnode *node; + struct ns_string *ns; + uint64_t clients = 0; + + frr_each_safe (ns_string, head, ns) { + node = listnode_lookup(ns->sessions, session); + if (!node) + continue; + list_delete_node(ns->sessions, node); + clients |= mgmt_be_interested_clients(ns->s, MGMT_BE_XPATH_SUBSCR_TYPE_OPER); + if (list_isempty(ns->sessions)) { + ns_string_del(head, ns); + mgmt_fe_free_ns_string(ns); + } + } + + return clients; +} + +static uint64_t mgmt_fe_add_ns_string(struct ns_string_head *head, const char *path, size_t plen, + struct mgmt_fe_session_ctx *session) +{ + struct ns_string *e, *ns; + uint64_t clients = 0; + + ns = XCALLOC(MTYPE_MGMTD_XPATH, sizeof(*ns) + plen + 1); + strlcpy(ns->s, path, plen + 1); + + e = ns_string_add(head, ns); + if (!e) { + ns->sessions = list_new(); + listnode_add(ns->sessions, session); + clients = mgmt_be_interested_clients(ns->s, MGMT_BE_XPATH_SUBSCR_TYPE_OPER); + } else { + XFREE(MTYPE_MGMTD_XPATH, ns); + if (!listnode_lookup(e->sessions, session)) + listnode_add(e->sessions, session); + } + + return clients; +} + +char **mgmt_fe_get_all_selectors(void) +{ + char **selectors = NULL; + struct ns_string *ns; + + frr_each (ns_string, &mgmt_fe_ns_strings, ns) + *darr_append(selectors) = darr_strdup(ns->s); + + return selectors; +} + + /* Forward declarations */ static void mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session, @@ -190,6 +291,7 @@ static void mgmt_fe_cleanup_session(struct mgmt_fe_session_ctx **sessionp) assert(session->adapter->refcount > 1); mgmt_fe_adapter_unlock(&session->adapter); } + mgmt_fe_ns_string_remove_session(&mgmt_fe_ns_strings, session); darr_free_free(session->notify_xpaths); hash_release(mgmt_fe_sessions, session); XFREE(MTYPE_MGMTD_FE_SESSION, session); @@ -1237,8 +1339,8 @@ static void fe_adapter_handle_session_req(struct mgmt_fe_client_adapter *adapter struct mgmt_fe_session_ctx *session; uint64_t client_id; - __dbg("Got session-req creating: %u for refer-id %" PRIu64 " from '%s'", - msg->refer_id == 0, msg->refer_id, adapter->name); + __dbg("Got session-req is create %u req-id %Lu for refer-id %Lu from '%s'", + msg->refer_id == 0, msg->req_id, msg->refer_id, adapter->name); if (msg->refer_id) { uint64_t session_id = msg->refer_id; @@ -1542,32 +1644,83 @@ static void fe_adapter_handle_edit(struct mgmt_fe_session_ctx *session, * @__msg: the message data. * @msg_len: the length of the message data. */ -static void fe_adapter_handle_notify_select(struct mgmt_fe_session_ctx *session, - void *__msg, size_t msg_len) +static void fe_adapter_handle_notify_select(struct mgmt_fe_session_ctx *session, void *__msg, + size_t msg_len) { struct mgmt_msg_notify_select *msg = __msg; uint64_t req_id = msg->req_id; const char **selectors = NULL; const char **new; + const char **sp; + char *selstr = NULL; + uint64_t clients = 0; + uint ret; if (msg_len >= sizeof(*msg)) { - selectors = mgmt_msg_native_strings_decode(msg, msg_len, - msg->selectors); + selectors = mgmt_msg_native_strings_decode(msg, msg_len, msg->selectors); if (!selectors) { - fe_adapter_send_error(session, req_id, false, -EINVAL, - "Invalid message"); + fe_adapter_send_error(session, req_id, false, -EINVAL, "Invalid message"); return; } } + if (DEBUG_MODE_CHECK(&mgmt_debug_fe, DEBUG_MODE_ALL)) { + selstr = frrstr_join(selectors, darr_len(selectors), ", "); + if (!selstr) + selstr = XSTRDUP(MTYPE_TMP, ""); + } + if (msg->replace) { + clients = mgmt_fe_ns_string_remove_session(&mgmt_fe_ns_strings, session); + // [ ] Keep a local tree to optimize sending selectors to BE? + // [*] Or just KISS and fanout the original message to BEs? + // mgmt_remove_add_notify_selectors(session->notify_xpaths, selectors); darr_free_free(session->notify_xpaths); session->notify_xpaths = selectors; } else if (selectors) { - new = darr_append_nz(session->notify_xpaths, - darr_len(selectors)); + // [ ] Keep a local tree to optimize sending selectors to BE? + // [*] Or just KISS and fanout the original message to BEs? + // mgmt_remove_add_notify_selectors(session->notify_xpaths, selectors); + new = darr_append_nz(session->notify_xpaths, darr_len(selectors)); memcpy(new, selectors, darr_len(selectors) * sizeof(*selectors)); - darr_free(selectors); + } else { + __log_err("Invalid msg from session-id: %Lu: no selectors present in non-replace msg", + session->session_id); + darr_free_free(selectors); + selectors = NULL; + goto done; } + + + if (session->notify_xpaths && DEBUG_MODE_CHECK(&mgmt_debug_fe, DEBUG_MODE_ALL)) { + const char **sel = session->notify_xpaths; + char *s = frrstr_join(sel, darr_len(sel), ", "); + __dbg("New NOTIF %d selectors '%s' (replace: %d) txn-id: %Lu for session-id: %Lu", + darr_len(sel), s, msg->replace, session->cfg_txn_id, session->session_id); + XFREE(MTYPE_TMP, s); + } + + /* Add the new selectors to the global tree */ + darr_foreach_p (selectors, sp) + clients |= mgmt_fe_add_ns_string(&mgmt_fe_ns_strings, *sp, darr_strlen(*sp), + session); + + if (!clients) { + __dbg("No backends to newly notify for selectors: '%s' txn-id %Lu session-id: %Lu", + selstr, session->txn_id, session->session_id); + goto done; + } + + /* We don't use a transaction for this, just send the message */ + ret = mgmt_txn_send_notify_selectors(req_id, clients, msg->replace ? NULL : selectors); + if (ret) { + fe_adapter_send_error(session, req_id, false, -EINPROGRESS, + "Failed to create a NOTIFY_SELECT transaction"); + } +done: + if (session->notify_xpaths != selectors) + darr_free(selectors); + if (selstr) + XFREE(MTYPE_TMP, selstr); } /** @@ -1758,10 +1911,11 @@ void mgmt_fe_adapter_send_notify(struct mgmt_msg_notify_data *msg, size_t msglen { struct mgmt_fe_client_adapter *adapter; struct mgmt_fe_session_ctx *session; - struct nb_node *nb_node; - const char **xpath_prefix; + struct nb_node *nb_node = NULL; + struct listnode *node; + struct ns_string *ns; const char *notif; - bool sendit; + bool is_root; uint len; assert(msg->refer_id == 0); @@ -1772,36 +1926,48 @@ void mgmt_fe_adapter_send_notify(struct mgmt_msg_notify_data *msg, size_t msglen return; } - /* - * We need the nb_node to obtain a path which does not include any - * specific list entry selectors - */ - nb_node = nb_node_find(notif); - if (!nb_node) { - __log_err("No schema found for notification: %s", notif); - return; + is_root = !strcmp(notif, "/"); + if (!is_root) { + /* + * We need the nb_node to obtain a path which does not include any + * specific list entry selectors + */ + nb_node = nb_node_find(notif); + if (!nb_node) { + __log_err("No schema found for notification: %s", notif); + return; + } } - FOREACH_ADAPTER_IN_LIST (adapter) { - FOREACH_SESSION_IN_LIST (adapter, session) { - /* If no selectors then always send */ - sendit = !session->notify_xpaths; - darr_foreach_p (session->notify_xpaths, xpath_prefix) { - len = strlen(*xpath_prefix); - if (!strncmp(*xpath_prefix, notif, len) || - !strncmp(*xpath_prefix, nb_node->xpath, - len)) { - sendit = true; - break; - } - } - if (sendit) { + frr_each (ns_string, &mgmt_fe_ns_strings, ns) { + if (!is_root) { + len = strlen(ns->s); + if (strncmp(ns->s, notif, len) && strncmp(ns->s, nb_node->xpath, len)) + continue; + } + for (ALL_LIST_ELEMENTS_RO(ns->sessions, node, session)) { + msg->refer_id = session->session_id; + (void)fe_adapter_send_native_msg(session->adapter, msg, msglen, false); + } + } + + /* + * Send all YANG defined notifications to all sesisons with *no* + * selectors as well (i.e., original NETCONF/RESTCONF notification + * scheme). + */ + if (!is_root && CHECK_FLAG(nb_node->snode->nodetype, LYS_NOTIF)) { + FOREACH_ADAPTER_IN_LIST (adapter) { + FOREACH_SESSION_IN_LIST (adapter, session) { + if (session->notify_xpaths) + continue; msg->refer_id = session->session_id; (void)fe_adapter_send_native_msg(adapter, msg, msglen, false); } } } + msg->refer_id = 0; } @@ -1810,9 +1976,10 @@ void mgmt_fe_adapter_lock(struct mgmt_fe_client_adapter *adapter) adapter->refcount++; } -extern void mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter) +void mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter) { struct mgmt_fe_client_adapter *a = *adapter; + assert(a && a->refcount); if (!--a->refcount) { @@ -1840,6 +2007,8 @@ void mgmt_fe_adapter_init(struct event_loop *tm) hash_create(mgmt_fe_session_hash_key, mgmt_fe_session_hash_cmp, "MGMT Frontend Sessions"); + ns_string_init(&mgmt_fe_ns_strings); + snprintf(server_path, sizeof(server_path), MGMTD_FE_SOCK_NAME); if (msg_server_init(&mgmt_fe_server, server_path, tm, @@ -1869,10 +2038,13 @@ void mgmt_fe_adapter_destroy(void) msg_server_cleanup(&mgmt_fe_server); + /* Deleting the adapters will delete all the sessions */ FOREACH_ADAPTER_IN_LIST (adapter) mgmt_fe_adapter_delete(adapter); + mgmt_fe_free_ns_strings(&mgmt_fe_ns_strings); + hash_clean_and_free(&mgmt_fe_sessions, mgmt_fe_abort_if_session); } @@ -1885,8 +2057,7 @@ struct msg_conn *mgmt_fe_create_adapter(int conn_fd, union sockunion *from) adapter = mgmt_fe_find_adapter_by_fd(conn_fd); if (!adapter) { - adapter = XCALLOC(MTYPE_MGMTD_FE_ADPATER, - sizeof(struct mgmt_fe_client_adapter)); + adapter = XCALLOC(MTYPE_MGMTD_FE_ADPATER, sizeof(struct mgmt_fe_client_adapter)); snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d", conn_fd); diff --git a/mgmtd/mgmt_fe_adapter.h b/mgmtd/mgmt_fe_adapter.h index 4d94e7604f..19a3d16342 100644 --- a/mgmtd/mgmt_fe_adapter.h +++ b/mgmtd/mgmt_fe_adapter.h @@ -225,6 +225,13 @@ extern int mgmt_fe_adapter_txn_error(uint64_t txn_id, uint64_t req_id, const char *errstr); +/** + * mgmt_fe_get_all_selectors() - Get all selectors for all frontend adapters. + * + * Returns: A darr array of all selectors for all frontend adapters. + */ +extern char **mgmt_fe_get_all_selectors(void); + /* Fetch frontend client session set-config stats */ extern struct mgmt_setcfg_stats * mgmt_fe_get_session_setcfg_stats(uint64_t session_id); diff --git a/mgmtd/mgmt_history.c b/mgmtd/mgmt_history.c index c97cb7f0fd..934748b1f2 100644 --- a/mgmtd/mgmt_history.c +++ b/mgmtd/mgmt_history.c @@ -177,6 +177,7 @@ static bool mgmt_history_dump_cmt_record_index(void) return false; } + assert(cnt <= 10); /* silence bad CLANG SA warning */ ret = fwrite(&cmt_info_set, sizeof(struct mgmt_cmt_info_t), cnt, fp); fclose(fp); if (ret != cnt) { diff --git a/mgmtd/mgmt_main.c b/mgmtd/mgmt_main.c index 1880d94415..e3fc6b7f29 100644 --- a/mgmtd/mgmt_main.c +++ b/mgmtd/mgmt_main.c @@ -159,6 +159,12 @@ const struct frr_yang_module_info ietf_netconf_with_defaults_info = { * clients into mgmtd. The modules are used by libyang in order to support * parsing binary data returns from the backend. */ +const struct frr_yang_module_info frr_backend_client_info = { + .name = "frr-backend", + .ignore_cfg_cbs = true, + .nodes = { { .xpath = NULL } }, +}; + const struct frr_yang_module_info zebra_route_map_info = { .name = "frr-zebra-route-map", .ignore_cfg_cbs = true, @@ -183,6 +189,7 @@ static const struct frr_yang_module_info *const mgmt_yang_modules[] = { /* * YANG module info used by backend clients get added here. */ + &frr_backend_client_info, &frr_zebra_cli_info, &zebra_route_map_info, @@ -231,10 +238,9 @@ int main(int argc, char **argv) int buffer_size = MGMTD_SOCKET_BUF_SIZE; frr_preinit(&mgmtd_di, argc, argv); - frr_opt_add( - "s:n" DEPRECATED_OPTIONS, longopts, - " -s, --socket_size Set MGMTD peer socket send buffer size\n" - " -n, --vrfwnetns Use NetNS as VRF backend\n"); + frr_opt_add("s:n" DEPRECATED_OPTIONS, longopts, + " -s, --socket_size Set MGMTD peer socket send buffer size\n" + " -n, --vrfwnetns Use NetNS as VRF backend (deprecated, use -w)\n"); /* Command line argument treatment. */ while (1) { @@ -257,6 +263,8 @@ int main(int argc, char **argv) buffer_size = atoi(optarg); break; case 'n': + fprintf(stderr, + "The -n option is deprecated, please use global -w option instead.\n"); vrf_configure_backend(VRF_BACKEND_NETNS); break; default: diff --git a/mgmtd/mgmt_testc.c b/mgmtd/mgmt_testc.c index 8bb07ed068..ab8ea9a04d 100644 --- a/mgmtd/mgmt_testc.c +++ b/mgmtd/mgmt_testc.c @@ -9,8 +9,10 @@ #include <zebra.h> #include <lib/version.h> #include "darr.h" +#include "debug.h" #include "libfrr.h" #include "mgmt_be_client.h" +#include "mgmt_msg_native.h" #include "northbound.h" /* ---------------- */ @@ -43,15 +45,15 @@ struct zebra_privs_t __privs = { .cap_num_i = 0, }; -#define OPTION_LISTEN 2000 -#define OPTION_NOTIF_COUNT 2001 -#define OPTION_TIMEOUT 2002 -const struct option longopts[] = { - { "listen", no_argument, NULL, OPTION_LISTEN }, - { "notif-count", required_argument, NULL, OPTION_NOTIF_COUNT }, - { "timeout", required_argument, NULL, OPTION_TIMEOUT }, - { 0 } -}; +#define OPTION_DATASTORE 2000 +#define OPTION_LISTEN 2001 +#define OPTION_NOTIF_COUNT 2002 +#define OPTION_TIMEOUT 2003 +const struct option longopts[] = { { "datastore", no_argument, NULL, OPTION_DATASTORE }, + { "listen", no_argument, NULL, OPTION_LISTEN }, + { "notify-count", required_argument, NULL, OPTION_NOTIF_COUNT }, + { "timeout", required_argument, NULL, OPTION_TIMEOUT }, + { 0 } }; /* Master of threads. */ @@ -79,6 +81,20 @@ struct frr_signal_t __signals[] = { #define MGMTD_TESTC_VTY_PORT 2624 /* clang-format off */ +static const struct frr_yang_module_info frr_if_info = { + .name = "frr-interface", + .ignore_cfg_cbs = true, + .nodes = { + { + .xpath = "/frr-interface:lib/interface", + .cbs.notify = async_notification, + }, + { + .xpath = NULL, + } + } +}; + static const struct frr_yang_module_info frr_ripd_info = { .name = "frr-ripd", .ignore_cfg_cbs = true, @@ -98,6 +114,8 @@ static const struct frr_yang_module_info frr_ripd_info = { }; static const struct frr_yang_module_info *const mgmt_yang_modules[] = { + &frr_backend_info, + &frr_if_info, &frr_ripd_info, }; @@ -123,6 +141,7 @@ const char **__rpc_xpaths; struct mgmt_be_client_cbs __client_cbs = {}; struct event *event_timeout; +int f_datastore; int o_notif_count = 1; int o_timeout; @@ -165,10 +184,56 @@ static void success(struct event *event) quit(0); } -static void async_notification(struct nb_cb_notify_args *args) +static void __ds_notification(struct nb_cb_notify_args *args) { - zlog_notice("Received YANG notification"); + uint8_t *output = NULL; + + zlog_notice("Received YANG datastore notification: op %u", args->op); + + if (args->op == NOTIFY_OP_NOTIFICATION) { + zlog_warn("ignoring non-datastore op notification: %s", args->xpath); + return; + } + + /* datastore notification */ + switch (args->op) { + case NOTIFY_OP_DS_REPLACE: + printfrr("#OP=REPLACE: %s\n", args->xpath); + break; + case NOTIFY_OP_DS_DELETE: + printfrr("#OP=DELETE: %s\n", args->xpath); + break; + case NOTIFY_OP_DS_PATCH: + printfrr("#OP=PATCH: %s\n", args->xpath); + break; + default: + printfrr("#OP=%u: unknown notify op\n", args->op); + quit(1); + } + if (args->dnode && args->op != NOTIFY_OP_DS_DELETE) { + output = yang_print_tree(args->dnode, LYD_JSON, LYD_PRINT_SHRINK); + if (output) { + printfrr("%s\n", output); + darr_free(output); + } + } + fflush(stdout); + + if (o_notif_count && !--o_notif_count) + quit(0); +} + +static void __notification(struct nb_cb_notify_args *args) +{ + zlog_notice("Received YANG notification: op: %u", args->op); + + if (args->op != NOTIFY_OP_NOTIFICATION) { + zlog_warn("ignoring datastore notification: op: %u: path %s", args->op, args->xpath); + return; + } + + /* bogus, we should print the actual data */ printf("{\"frr-ripd:authentication-failure\": {\"interface-name\": \"%s\"}}\n", yang_dnode_get_string(args->dnode, "interface-name")); @@ -176,6 +241,14 @@ static void async_notification(struct nb_cb_notify_args *args) quit(0); } +static void async_notification(struct nb_cb_notify_args *args) +{ + if (f_datastore) + __ds_notification(args); + else + __notification(args); +} + static int rpc_callback(struct nb_cb_rpc_args *args) { const char *vrf = NULL; @@ -210,6 +283,9 @@ int main(int argc, char **argv) break; switch (opt) { + case OPTION_DATASTORE: + f_datastore = 1; + break; case OPTION_LISTEN: f_listen = 1; break; @@ -228,6 +304,9 @@ int main(int argc, char **argv) master = frr_init(); + mgmt_be_client_lib_vty_init(); + mgmt_dbg_be_client.flags = DEBUG_MODE_ALL; + /* * Setup notification listen */ diff --git a/mgmtd/mgmt_txn.c b/mgmtd/mgmt_txn.c index ccfdd7539f..4afab389b8 100644 --- a/mgmtd/mgmt_txn.c +++ b/mgmtd/mgmt_txn.c @@ -237,6 +237,7 @@ struct mgmt_txn_ctx { struct event *clnup; /* List of backend adapters involved in this transaction */ + /* XXX reap this */ struct mgmt_txn_badapters_head be_adapters; int refcount; @@ -2651,6 +2652,52 @@ int mgmt_txn_send_rpc(uint64_t txn_id, uint64_t req_id, uint64_t clients, return 0; } +int mgmt_txn_send_notify_selectors(uint64_t req_id, uint64_t clients, const char **selectors) +{ + struct mgmt_msg_notify_select *msg; + char **all_selectors = NULL; + uint64_t id; + int ret; + uint i; + + msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_notify_select, 0, + MTYPE_MSG_NATIVE_NOTIFY_SELECT); + msg->refer_id = MGMTD_TXN_ID_NONE; + msg->req_id = req_id; + msg->code = MGMT_MSG_CODE_NOTIFY_SELECT; + msg->replace = selectors == NULL; + + if (selectors == NULL) { + /* Get selectors for all sessions */ + all_selectors = mgmt_fe_get_all_selectors(); + selectors = (const char **)all_selectors; + } + + darr_foreach_i (selectors, i) + mgmt_msg_native_add_str(msg, selectors[i]); + + assert(clients); + FOREACH_BE_CLIENT_BITS (id, clients) { + /* make sure the backend is running/connected */ + if (!mgmt_be_get_adapter_by_id(id)) + continue; + ret = mgmt_be_send_native(id, msg); + if (ret) { + __log_err("Could not send notify-select message to backend client %s", + mgmt_be_client_id2name(id)); + continue; + } + + __dbg("Sent notify-select req to backend client %s", mgmt_be_client_id2name(id)); + } + mgmt_msg_native_free_msg(msg); + + if (all_selectors) + darr_free_free(all_selectors); + + return 0; +} + /* * Error reply from the backend client. */ diff --git a/mgmtd/mgmt_txn.h b/mgmtd/mgmt_txn.h index 37dadc0171..879d205175 100644 --- a/mgmtd/mgmt_txn.h +++ b/mgmtd/mgmt_txn.h @@ -297,6 +297,16 @@ extern int mgmt_txn_send_rpc(uint64_t txn_id, uint64_t req_id, uint64_t clients, LYD_FORMAT result_type, const char *xpath, const char *data, size_t data_len); +/** + * mgmt_txn_send_notify_selectors() - Send NOTIFY SELECT request. + * @req_id: FE client request identifier. + * @clients: Bitmask of clients to send RPC to. + * @selectors: Array of selectors or NULL to resend all selectors to BE clients. + * + * Returns 0 on success. + */ +extern int mgmt_txn_send_notify_selectors(uint64_t req_id, uint64_t clients, const char **selectors); + /* * Notifiy backend adapter on connection. */ diff --git a/ospf6d/ospf6_lsdb.c b/ospf6d/ospf6_lsdb.c index e5de30484a..3215d51a7a 100644 --- a/ospf6d/ospf6_lsdb.c +++ b/ospf6d/ospf6_lsdb.c @@ -258,7 +258,8 @@ struct ospf6_lsa *ospf6_lsdb_lookup_next(uint16_t type, uint32_t id, ospf6_lsdb_set_key(&key, &adv_router, sizeof(adv_router)); ospf6_lsdb_set_key(&key, &id, sizeof(id)); - zlog_debug("lsdb_lookup_next: key: %pFX", &key); + if (OSPF6_LSA_DEBUG) + zlog_debug("lsdb_lookup_next: key: %pFX", &key); node = route_table_get_next(lsdb->table, &key); @@ -398,7 +399,9 @@ int ospf6_lsdb_maxage_remover(struct ospf6_lsdb *lsdb) EVENT_OFF(lsa->refresh); event_execute(master, ospf6_lsa_refresh, lsa, 0, NULL); } else { - zlog_debug("calling ospf6_lsdb_remove %s", lsa->name); + if (IS_OSPF6_DEBUG_LSA_TYPE(lsa->header->type)) + zlog_debug("calling ospf6_lsdb_remove %s", lsa->name); + ospf6_lsdb_remove(lsa, lsdb); } } diff --git a/ospfclient/ospfclient.py b/ospfclient/ospfclient.py index 7477ef8191..588c1c9fdd 100755 --- a/ospfclient/ospfclient.py +++ b/ospfclient/ospfclient.py @@ -306,7 +306,7 @@ class OspfApiClient: self._s = None self._as = None self._ls = None - self._ar = self._r = self._w = None + self._ar = self._r = self._aw = self._w = None self.server = server self.handlers = handlers if handlers is not None else dict() self.write_lock = Lock() @@ -345,7 +345,7 @@ class OspfApiClient: logging.debug("%s: success", self) self._r, self._w = await asyncio.open_connection(sock=self._s) - self._ar, _ = await asyncio.open_connection(sock=self._as) + self._ar, self._aw = await asyncio.open_connection(sock=self._as) self._seq = 1 async def connect(self): diff --git a/ospfd/ospf_asbr.c b/ospfd/ospf_asbr.c index aa11467027..978a6fcc13 100644 --- a/ospfd/ospf_asbr.c +++ b/ospfd/ospf_asbr.c @@ -1145,8 +1145,7 @@ static void ospf_external_aggr_timer(struct ospf *ospf, aggr->action = operation; if (ospf->t_external_aggr) { - if (ospf->aggr_action == OSPF_ROUTE_AGGR_ADD) { - + if (ospf->aggr_action == OSPF_ROUTE_AGGR_ADD || operation != OSPF_ROUTE_AGGR_ADD) { if (IS_DEBUG_OSPF(lsa, EXTNL_LSA_AGGR)) zlog_debug("%s: Not required to restart timer,set is already added.", __func__); diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c index 27528f6594..95e8b179d8 100644 --- a/ospfd/ospf_vty.c +++ b/ospfd/ospf_vty.c @@ -10258,8 +10258,10 @@ DEFUN (ospf_external_route_aggregation, tag = strtoul(argv[idx + 2]->arg, NULL, 10); ret = ospf_asbr_external_aggregator_set(ospf, &p, tag); - if (ret == OSPF_INVALID) - vty_out(vty, "Invalid configuration!!\n"); + if (ret == OSPF_FAILURE) { + vty_out(vty, "%% Failed to set summary-address!\n"); + return CMD_WARNING_CONFIG_FAILED; + } return CMD_SUCCESS; } @@ -10611,8 +10613,10 @@ DEFUN (ospf_external_route_aggregation_no_adrvertise, } ret = ospf_asbr_external_rt_no_advertise(ospf, &p); - if (ret == OSPF_INVALID) - vty_out(vty, "Invalid configuration!!\n"); + if (ret == OSPF_FAILURE) { + vty_out(vty, "%% Failed to set summary-address!\n"); + return CMD_WARNING_CONFIG_FAILED; + } return CMD_SUCCESS; } diff --git a/ospfd/ospf_zebra.c b/ospfd/ospf_zebra.c index b718d498ae..f45135f44f 100644 --- a/ospfd/ospf_zebra.c +++ b/ospfd/ospf_zebra.c @@ -304,6 +304,27 @@ void ospf_zebra_add(struct ospf *ospf, struct prefix_ipv4 *p, if (api.nexthop_num >= ospf->max_multipath) break; + /* + * Prune duplicate next-hops from the route that is + * installed in the zebra IP route table. OSPF Intra-Area + * routes never have duplicates. + */ + if (or->path_type != OSPF_PATH_INTRA_AREA) { + struct zapi_nexthop *api_nh = &api.nexthops[0]; + unsigned int nh_index; + bool duplicate_next_hop = false; + + for (nh_index = 0; nh_index < api.nexthop_num; api_nh++, nh_index++) { + if (IPV4_ADDR_SAME(&api_nh->gate.ipv4, &path->nexthop) && + (api_nh->ifindex == path->ifindex)) { + duplicate_next_hop = true; + break; + } + } + if (duplicate_next_hop) + continue; + } + ospf_zebra_add_nexthop(ospf, path, &api); if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) { diff --git a/pimd/pim_autorp.c b/pimd/pim_autorp.c index caed914a87..dc077dbbd6 100644 --- a/pimd/pim_autorp.c +++ b/pimd/pim_autorp.c @@ -113,6 +113,12 @@ static void pim_autorp_free(struct pim_autorp *autorp) XFREE(MTYPE_PIM_AUTORP_ANNOUNCE, autorp->announce_pkt); } +static bool pim_autorp_should_close(struct pim_autorp *autorp) +{ + /* If discovery or mapping agent is active, then we need the socket open */ + return !autorp->do_discovery && !autorp->send_rp_discovery; +} + static bool pim_autorp_join_groups(struct interface *ifp) { struct pim_interface *pim_ifp; @@ -670,10 +676,19 @@ static void autorp_send_discovery(struct event *evt) &(autorp->send_discovery_timer)); } +static bool pim_autorp_socket_enable(struct pim_autorp *autorp); +static bool pim_autorp_socket_disable(struct pim_autorp *autorp); + static void autorp_send_discovery_on(struct pim_autorp *autorp) { int interval = 5; + /* Make sure the socket is open and ready */ + if (!pim_autorp_socket_enable(autorp)) { + zlog_err("%s: AutoRP failed to open socket", __func__); + return; + } + /* Send the first discovery shortly after being enabled. * If the configured interval is less than 5 seconds, then just use that. */ @@ -695,6 +710,10 @@ static void autorp_send_discovery_off(struct pim_autorp *autorp) if (PIM_DEBUG_AUTORP) zlog_debug("%s: AutoRP discovery sending disabled", __func__); event_cancel(&(autorp->send_discovery_timer)); + + /* Close the socket if we need to */ + if (pim_autorp_should_close(autorp) && !pim_autorp_socket_disable(autorp)) + zlog_warn("%s: AutoRP failed to close socket", __func__); } static bool autorp_recv_discovery(struct pim_autorp *autorp, uint8_t rpcnt, uint16_t holdtime, @@ -949,6 +968,10 @@ static bool pim_autorp_socket_enable(struct pim_autorp *autorp) { int fd; + /* Return early if socket is already enabled */ + if (autorp->sock != -1) + return true; + frr_with_privs (&pimd_privs) { fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); if (fd < 0) { @@ -975,6 +998,10 @@ static bool pim_autorp_socket_enable(struct pim_autorp *autorp) static bool pim_autorp_socket_disable(struct pim_autorp *autorp) { + /* Return early if socket is already disabled */ + if (autorp->sock == -1) + return true; + if (close(autorp->sock)) { zlog_warn("Failure closing autorp socket: fd=%d errno=%d: %s", autorp->sock, errno, safe_strerror(errno)); @@ -1453,6 +1480,12 @@ void pim_autorp_start_discovery(struct pim_instance *pim) struct interface *ifp; struct pim_autorp *autorp = pim->autorp; + /* Make sure the socket is open and ready */ + if (!pim_autorp_socket_enable(autorp)) { + zlog_err("%s: AutoRP failed to open socket", __func__); + return; + } + if (!autorp->do_discovery) { autorp->do_discovery = true; autorp_read_on(autorp); @@ -1482,6 +1515,10 @@ void pim_autorp_stop_discovery(struct pim_instance *pim) if (PIM_DEBUG_AUTORP) zlog_debug("%s: AutoRP Discovery stopped", __func__); } + + /* Close the socket if we need to */ + if (pim_autorp_should_close(autorp) && !pim_autorp_socket_disable(autorp)) + zlog_warn("%s: AutoRP failed to close socket", __func__); } void pim_autorp_init(struct pim_instance *pim) @@ -1510,12 +1547,6 @@ void pim_autorp_init(struct pim_instance *pim) pim->autorp = autorp; - if (!pim_autorp_socket_enable(autorp)) { - zlog_warn("%s: AutoRP failed to initialize, feature will not work correctly", - __func__); - return; - } - if (PIM_DEBUG_AUTORP) zlog_debug("%s: AutoRP Initialized", __func__); diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c index 6c4d649235..50fe543b23 100644 --- a/pimd/pim_bsm.c +++ b/pimd/pim_bsm.c @@ -354,24 +354,29 @@ static void pim_on_g2rp_timer(struct event *t) bsrp = EVENT_ARG(t); EVENT_OFF(bsrp->g2rp_timer); bsgrp_node = bsrp->bsgrp_node; - - /* elapse time is the hold time of expired node */ - elapse = bsrp->rp_holdtime; + pim = bsgrp_node->scope->pim; bsrp_addr = bsrp->rp_address; - /* update elapse for all bsrp nodes */ - frr_each_safe (bsm_rpinfos, bsgrp_node->bsrp_list, bsrp_node) { - bsrp_node->elapse_time += elapse; - - if (is_hold_time_elapsed(bsrp_node)) { - bsm_rpinfos_del(bsgrp_node->bsrp_list, bsrp_node); - pim_bsm_rpinfo_free(bsrp_node); + /* + * Update elapse for all bsrp nodes except on the BSR itself. + * The timer is meant to remove any bsr RPs learned from the BSR that + * we don't hear from anymore. on the BSR itself, no need to do this. + */ + if (pim->global_scope.state != BSR_ELECTED) { + /* elapse time is the hold time of expired node */ + elapse = bsrp->rp_holdtime; + frr_each_safe (bsm_rpinfos, bsgrp_node->bsrp_list, bsrp_node) { + bsrp_node->elapse_time += elapse; + + if (is_hold_time_elapsed(bsrp_node)) { + bsm_rpinfos_del(bsgrp_node->bsrp_list, bsrp_node); + pim_bsm_rpinfo_free(bsrp_node); + } } } /* Get the next elected rp node */ bsrp = bsm_rpinfos_first(bsgrp_node->bsrp_list); - pim = bsgrp_node->scope->pim; rn = route_node_lookup(pim->rp_table, &bsgrp_node->group); if (!rn) { @@ -386,7 +391,7 @@ static void pim_on_g2rp_timer(struct event *t) return; } - if (rp_info->rp_src != RP_SRC_STATIC) { + if (rp_info->rp_src == RP_SRC_BSR) { /* If new rp available, change it else delete the existing */ if (bsrp) { pim_g2rp_timer_start( @@ -2165,6 +2170,7 @@ static void cand_addrsel_config_write(struct vty *vty, int pim_cand_config_write(struct pim_instance *pim, struct vty *vty) { struct bsm_scope *scope = &pim->global_scope; + struct cand_rp_group *group; int ret = 0; if (scope->cand_rp_addrsel.cfg_enable) { @@ -2176,14 +2182,11 @@ int pim_cand_config_write(struct pim_instance *pim, struct vty *vty) cand_addrsel_config_write(vty, &scope->cand_rp_addrsel); vty_out(vty, "\n"); ret++; + } - struct cand_rp_group *group; - - frr_each (cand_rp_groups, scope->cand_rp_groups, group) { - vty_out(vty, " bsr candidate-rp group %pFX\n", - &group->p); - ret++; - } + frr_each (cand_rp_groups, scope->cand_rp_groups, group) { + vty_out(vty, " bsr candidate-rp group %pFX\n", &group->p); + ret++; } if (scope->bsr_addrsel.cfg_enable) { diff --git a/pimd/pim_bsr_rpdb.c b/pimd/pim_bsr_rpdb.c index 02e7a69ff1..860009312d 100644 --- a/pimd/pim_bsr_rpdb.c +++ b/pimd/pim_bsr_rpdb.c @@ -417,7 +417,7 @@ void pim_crp_nht_update(struct pim_instance *pim, struct pim_nexthop_cache *pnc) rp = bsr_crp_rps_find(scope->ebsr_rps, &ref); assertf(rp, "addr=%pPA", &ref.addr); - ok = pim_nht_pnc_is_valid(pim, pnc); + ok = pim_nht_pnc_is_valid(pim, pnc, PIMADDR_ANY); if (ok == rp->nht_ok) return; diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index a34fb344fe..a1ad261869 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -3296,7 +3296,7 @@ DEFUN (show_ip_rib, return CMD_WARNING; } - if (!pim_nht_lookup(vrf->info, &nexthop, addr, 0)) { + if (!pim_nht_lookup(vrf->info, &nexthop, addr, PIMADDR_ANY, false)) { vty_out(vty, "Failure querying RIB nexthop for unicast address %s\n", addr_str); @@ -8878,21 +8878,31 @@ done: } DEFPY_YANG(pim_rpf_lookup_mode, pim_rpf_lookup_mode_cmd, - "[no] rpf-lookup-mode ![urib-only|mrib-only|mrib-then-urib|lower-distance|longer-prefix]$mode", + "[no] rpf-lookup-mode\ + ![urib-only|mrib-only|mrib-then-urib|lower-distance|longer-prefix]$mode\ + [{group-list PREFIX_LIST$grp_list|source-list PREFIX_LIST$src_list}]", NO_STR "RPF lookup behavior\n" "Lookup in unicast RIB only\n" "Lookup in multicast RIB only\n" "Try multicast RIB first, fall back to unicast RIB\n" "Lookup both, use entry with lower distance\n" - "Lookup both, use entry with longer prefix\n") -{ - if (no) - nb_cli_enqueue_change(vty, "./mcast-rpf-lookup", NB_OP_DESTROY, NULL); - else - nb_cli_enqueue_change(vty, "./mcast-rpf-lookup", NB_OP_MODIFY, mode); + "Lookup both, use entry with longer prefix\n" + "Set a specific mode matching group\n" + "Multicast group prefix list\n" + "Set a specific mode matching source address\n" + "Source address prefix list\n") +{ + if (no) { + nb_cli_enqueue_change(vty, "./mode", NB_OP_DESTROY, NULL); + nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL); + } else { + nb_cli_enqueue_change(vty, ".", NB_OP_CREATE, NULL); + nb_cli_enqueue_change(vty, "./mode", NB_OP_MODIFY, mode); + } - return nb_cli_apply_changes(vty, NULL); + return nb_cli_apply_changes(vty, "./mcast-rpf-lookup[group-list='%s'][source-list='%s']", + (grp_list ? grp_list : ""), (src_list ? src_list : "")); } struct cmd_node pim_node = { diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c index 8aebce7d27..9f09852a94 100644 --- a/pimd/pim_cmd_common.c +++ b/pimd/pim_cmd_common.c @@ -608,26 +608,14 @@ int pim_process_no_rp_plist_cmd(struct vty *vty, const char *rp_str, int pim_process_autorp_cmd(struct vty *vty) { - char xpath[XPATH_MAXLEN]; - - snprintf(xpath, sizeof(xpath), "%s/%s", FRR_PIM_AUTORP_XPATH, - "discovery-enabled"); - - nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, "true"); - - return nb_cli_apply_changes(vty, NULL); + nb_cli_enqueue_change(vty, "./discovery-enabled", NB_OP_MODIFY, "true"); + return nb_cli_apply_changes(vty, "%s", FRR_PIM_AUTORP_XPATH); } int pim_process_no_autorp_cmd(struct vty *vty) { - char xpath[XPATH_MAXLEN]; - - snprintf(xpath, sizeof(xpath), "%s/%s", FRR_PIM_AUTORP_XPATH, - "discovery-enabled"); - - nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); - - return nb_cli_apply_changes(vty, NULL); + nb_cli_enqueue_change(vty, "./discovery-enabled", NB_OP_MODIFY, "false"); + return nb_cli_apply_changes(vty, "%s", FRR_PIM_AUTORP_XPATH); } int pim_process_autorp_candidate_rp_cmd(struct vty *vty, bool no, const char *rpaddr_str, diff --git a/pimd/pim_igmp_mtrace.c b/pimd/pim_igmp_mtrace.c index ad6f265101..f0fbb2bbf6 100644 --- a/pimd/pim_igmp_mtrace.c +++ b/pimd/pim_igmp_mtrace.c @@ -59,7 +59,8 @@ static bool mtrace_fwd_info_weak(struct pim_instance *pim, memset(&nexthop, 0, sizeof(nexthop)); - if (!pim_nht_lookup(pim, &nexthop, mtracep->src_addr, 1)) { + /* TODO Is there any valid group address to use for lookup? */ + if (!pim_nht_lookup(pim, &nexthop, mtracep->src_addr, PIMADDR_ANY, true)) { if (PIM_DEBUG_MTRACE) zlog_debug("mtrace not found neighbor"); return false; @@ -354,7 +355,8 @@ static int mtrace_un_forward_packet(struct pim_instance *pim, struct ip *ip_hdr, if (interface == NULL) { memset(&nexthop, 0, sizeof(nexthop)); - if (!pim_nht_lookup(pim, &nexthop, ip_hdr->ip_dst, 0)) { + /* TODO Is there any valid group address to use for lookup? */ + if (!pim_nht_lookup(pim, &nexthop, ip_hdr->ip_dst, PIMADDR_ANY, false)) { if (PIM_DEBUG_MTRACE) zlog_debug( "Dropping mtrace packet, no route to destination"); @@ -535,8 +537,11 @@ static int mtrace_send_response(struct pim_instance *pim, zlog_debug("mtrace response to RP"); } else { memset(&nexthop, 0, sizeof(nexthop)); - /* TODO: should use unicast rib lookup */ - if (!pim_nht_lookup(pim, &nexthop, mtracep->rsp_addr, 1)) { + /* TODO: should use unicast rib lookup + * NEB 10/30/24 - Not sure why this needs the unicast rib...right now it will look up per the rpf mode + * Are any of the igmp_mtrace addresses a valid group address to use for lookups?? + */ + if (!pim_nht_lookup(pim, &nexthop, mtracep->rsp_addr, PIMADDR_ANY, true)) { if (PIM_DEBUG_MTRACE) zlog_debug( "Dropped response qid=%ud, no route to response address", diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h index 7f022111bc..7aa9d857d4 100644 --- a/pimd/pim_instance.h +++ b/pimd/pim_instance.h @@ -18,6 +18,7 @@ #include "pim_upstream.h" #include "pim_mroute.h" #include "pim_autorp.h" +#include "pim_nht.h" enum pim_spt_switchover { PIM_SPT_IMMEDIATE, @@ -116,7 +117,7 @@ struct pim_instance { char *register_plist; struct hash *nht_hash; - enum pim_rpf_lookup_mode rpf_mode; + struct pim_lookup_mode_head rpf_mode; void *ssm_info; /* per-vrf SSM configuration */ diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c index 93bdd8dac9..6c13e1324f 100644 --- a/pimd/pim_mroute.c +++ b/pimd/pim_mroute.c @@ -567,7 +567,8 @@ int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf, * setting the SPTBIT to true */ if (!(pim_addr_is_any(up->upstream_register)) && - pim_nht_lookup(pim_ifp->pim, &source, up->upstream_register, 0)) { + pim_nht_lookup(pim_ifp->pim, &source, up->upstream_register, up->sg.grp, + false)) { pim_register_stop_send(source.interface, &sg, pim_ifp->primary_address, up->upstream_register); @@ -580,7 +581,8 @@ int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf, __func__); } else { if (I_am_RP(pim_ifp->pim, up->sg.grp)) { - if (pim_nht_lookup(pim_ifp->pim, &source, up->upstream_register, 0)) + if (pim_nht_lookup(pim_ifp->pim, &source, up->upstream_register, + up->sg.grp, false)) pim_register_stop_send( source.interface, &sg, pim_ifp->primary_address, diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c index 5e5ee5e91f..46d5f4881f 100644 --- a/pimd/pim_msdp.c +++ b/pimd/pim_msdp.c @@ -706,7 +706,7 @@ bool pim_msdp_peer_rpf_check(struct pim_msdp_peer *mp, struct in_addr rp) } /* check if the MSDP peer is the nexthop for the RP */ - if (pim_nht_lookup(mp->pim, &nexthop, rp, 0) && + if (pim_nht_lookup(mp->pim, &nexthop, rp, PIMADDR_ANY, false) && nexthop.mrib_nexthop_addr.s_addr == mp->peer.s_addr) { return true; } diff --git a/pimd/pim_nb.c b/pimd/pim_nb.c index b55541b810..ea9ce3cecb 100644 --- a/pimd/pim_nb.c +++ b/pimd/pim_nb.c @@ -266,7 +266,14 @@ const struct frr_yang_module_info frr_pim_info = { { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mcast-rpf-lookup", .cbs = { - .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_modify, + .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_create, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mcast-rpf-lookup/mode", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_mode_modify } }, { diff --git a/pimd/pim_nb.h b/pimd/pim_nb.h index a5ef6ad60a..a15c6e6d9f 100644 --- a/pimd/pim_nb.h +++ b/pimd/pim_nb.h @@ -102,7 +102,11 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_re struct nb_cb_modify_args *args); int routing_control_plane_protocols_control_plane_protocol_pim_address_family_register_accept_list_destroy( struct nb_cb_destroy_args *args); -int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_modify( +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_create( + struct nb_cb_create_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_destroy( + struct nb_cb_destroy_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_mode_modify( struct nb_cb_modify_args *args); int lib_interface_pim_address_family_dr_priority_modify( struct nb_cb_modify_args *args); diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index b55d08bab9..51f0615884 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -1895,12 +1895,25 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_re /* * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mcast-rpf-lookup */ -int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_modify( - struct nb_cb_modify_args *args) +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_create( + struct nb_cb_create_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + break; + } + + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_destroy( + struct nb_cb_destroy_args *args) { struct vrf *vrf; struct pim_instance *pim; - enum pim_rpf_lookup_mode old_mode; switch (args->event) { case NB_EV_VALIDATE: @@ -1910,15 +1923,37 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mc case NB_EV_APPLY: vrf = nb_running_get_entry(args->dnode, NULL, true); pim = vrf->info; - old_mode = pim->rpf_mode; - pim->rpf_mode = yang_dnode_get_enum(args->dnode, NULL); + pim_nht_change_rpf_mode(pim, yang_dnode_get_string(args->dnode, "group-list"), + yang_dnode_get_string(args->dnode, "source-list"), + MCAST_NO_CONFIG); + break; + } - if (pim->rpf_mode != old_mode && - /* MCAST_MIX_MRIB_FIRST is the default if not configured */ - (old_mode != MCAST_NO_CONFIG && pim->rpf_mode != MCAST_MIX_MRIB_FIRST)) { - pim_nht_mode_changed(pim); - } + return NB_OK; +} + +/* + * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mcast-rpf-lookup/mode + */ +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_mode_modify( + struct nb_cb_modify_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + enum pim_rpf_lookup_mode mode = MCAST_NO_CONFIG; + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + mode = yang_dnode_get_enum(args->dnode, NULL); + pim_nht_change_rpf_mode(pim, yang_dnode_get_string(args->dnode, "../group-list"), + yang_dnode_get_string(args->dnode, "../source-list"), mode); break; } diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c index 00ab46b4cd..1e9ea24b26 100644 --- a/pimd/pim_nht.c +++ b/pimd/pim_nht.c @@ -34,6 +34,176 @@ #include "pim_register.h" #include "pim_vxlan.h" +DEFINE_MTYPE_STATIC(PIMD, PIM_LOOKUP_MODE, "PIM RPF lookup mode"); +DEFINE_MTYPE_STATIC(PIMD, PIM_LOOKUP_MODE_STR, "PIM RPF lookup mode prefix list string"); + +static void pim_update_rp_nh(struct pim_instance *pim, struct pim_nexthop_cache *pnc); +static int pim_update_upstream_nh(struct pim_instance *pim, struct pim_nexthop_cache *pnc); + +static int pim_lookup_mode_cmp(const struct pim_lookup_mode *l, const struct pim_lookup_mode *r) +{ + /* Let's just sort anything with both lists set above those with only one list set, + * which is above the global where neither are set + */ + + /* Both are set on right, either lower or equal */ + if (l->grp_plist != NULL && l->src_plist != NULL) + return (r->grp_plist == NULL || r->src_plist == NULL) ? -1 : 0; + + /* Only one set on the left */ + if (!(l->grp_plist == NULL && l->src_plist == NULL)) { + /* Lower only if both are not set on right */ + if (r->grp_plist == NULL && r->src_plist == NULL) + return -1; + /* Higher only if both are set on right */ + if (r->grp_plist != NULL && r->src_plist != NULL) + return 1; + /* Otherwise both sides have at least one set, so equal */ + return 0; + } + + /* Neither set on left, so equal if neither set on right also */ + if (r->grp_plist == NULL && r->src_plist == NULL) + return 0; + + /* Otherwise higher */ + return 1; +} + +DECLARE_SORTLIST_NONUNIQ(pim_lookup_mode, struct pim_lookup_mode, list, pim_lookup_mode_cmp); + +static void pim_lookup_mode_free(struct pim_lookup_mode *m) +{ + if (m->grp_plist) + XFREE(MTYPE_PIM_LOOKUP_MODE_STR, m->grp_plist); + if (m->src_plist) + XFREE(MTYPE_PIM_LOOKUP_MODE_STR, m->src_plist); + XFREE(MTYPE_PIM_LOOKUP_MODE, m); +} + +static void pim_lookup_mode_list_free(struct pim_lookup_mode_head *head) +{ + struct pim_lookup_mode *m; + + while ((m = pim_lookup_mode_pop(head))) + pim_lookup_mode_free(m); +} + +enum pim_rpf_lookup_mode pim_get_lookup_mode(struct pim_instance *pim, pim_addr group, + pim_addr source) +{ + struct pim_lookup_mode *m; + struct prefix_list *plist; + struct prefix p; + + frr_each_safe (pim_lookup_mode, &(pim->rpf_mode), m) { + if (!pim_addr_is_any(group) && m->grp_plist) { + /* Match group against plist, continue if no match */ + plist = prefix_list_lookup(PIM_AFI, m->grp_plist); + if (plist == NULL) + continue; + pim_addr_to_prefix(&p, group); + if (prefix_list_apply(plist, &p) == PREFIX_DENY) + continue; + } + + if (!pim_addr_is_any(source) && m->src_plist) { + /* Match source against plist, continue if no match */ + plist = prefix_list_lookup(PIM_AFI, m->src_plist); + if (plist == NULL) + continue; + pim_addr_to_prefix(&p, source); + if (prefix_list_apply(plist, &p) == PREFIX_DENY) + continue; + } + + /* If lookup mode has a group list, but no group is provided, don't match it */ + if (pim_addr_is_any(group) && m->grp_plist) + continue; + + /* If lookup mode has a source list, but no source is provided, don't match it */ + if (pim_addr_is_any(source) && m->src_plist) + continue; + + /* Match found */ + return m->mode; + } + + /* This shouldn't happen since we have the global mode, but if it's gone, + * just return the default of no config + */ + if (PIM_DEBUG_PIM_NHT) + zlog_debug("%s: No RPF lookup matched for given group %pPA and source %pPA", + __func__, &group, &source); + + return MCAST_NO_CONFIG; +} + +static bool pim_rpf_mode_changed(enum pim_rpf_lookup_mode old, enum pim_rpf_lookup_mode new) +{ + if (old != new) { + /* These two are equivalent, so don't update in that case */ + if (old == MCAST_NO_CONFIG && new == MCAST_MIX_MRIB_FIRST) + return false; + if (old == MCAST_MIX_MRIB_FIRST && new == MCAST_NO_CONFIG) + return false; + return true; + } + return false; +} + +struct pnc_mode_update_hash_walk_data { + struct pim_instance *pim; + struct prefix_list *grp_plist; + struct prefix_list *src_plist; +}; + +static int pim_nht_hash_mode_update_helper(struct hash_bucket *bucket, void *arg) +{ + struct pim_nexthop_cache *pnc = bucket->data; + struct pnc_mode_update_hash_walk_data *pwd = arg; + struct pim_instance *pim = pwd->pim; + struct prefix p; + + pim_addr_to_prefix(&p, pnc->addr); + + /* Make sure this pnc entry matches the prefix lists */ + /* TODO: For now, pnc only has the source address, so we can only check that */ + if (pwd->src_plist && + (pim_addr_is_any(pnc->addr) || prefix_list_apply(pwd->src_plist, &p) == PREFIX_DENY)) + return HASHWALK_CONTINUE; + + /* Otherwise the address is any, or matches the prefix list, or no prefix list to match, so do the updates */ + /* TODO for RP, there are groups....but I don't think we'd want to use those */ + if (listcount(pnc->rp_list)) + pim_update_rp_nh(pim, pnc); + + /* TODO for upstream, there is an S,G key...can/should we use that group?? */ + if (pnc->upstream_hash->count) + pim_update_upstream_nh(pim, pnc); + + if (pnc->candrp_count) + pim_crp_nht_update(pim, pnc); + + return HASHWALK_CONTINUE; +} + +static void pim_rpf_mode_changed_update(struct pim_instance *pim, const char *group_plist, + const char *source_plist) +{ + struct pnc_mode_update_hash_walk_data pwd; + + /* Update the refresh time to force new lookups if needed */ + pim_rpf_set_refresh_time(pim); + + /* Force update the registered RP and upstreams for all cache entries */ + pwd.pim = pim; + pwd.grp_plist = prefix_list_lookup(PIM_AFI, group_plist); + pwd.src_plist = prefix_list_lookup(PIM_AFI, source_plist); + + hash_walk(pim->nht_hash, pim_nht_hash_mode_update_helper, &pwd); +} + /** * pim_sendmsg_zebra_rnh -- Format and send a nexthop register/Unregister * command to Zebra. @@ -106,9 +276,10 @@ static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim, return pnc; } -static bool pim_nht_pnc_has_answer(struct pim_instance *pim, struct pim_nexthop_cache *pnc) +static bool pim_nht_pnc_has_answer(struct pim_instance *pim, struct pim_nexthop_cache *pnc, + pim_addr group) { - switch (pim->rpf_mode) { + switch (pim_get_lookup_mode(pim, group, pnc->addr)) { case MCAST_MRIB_ONLY: return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_ANSWER_RECEIVED); @@ -133,25 +304,28 @@ static bool pim_nht_pnc_has_answer(struct pim_instance *pim, struct pim_nexthop_ } static struct pim_nexthop_cache_rib *pim_pnc_get_rib(struct pim_instance *pim, - struct pim_nexthop_cache *pnc) + struct pim_nexthop_cache *pnc, pim_addr group) { struct pim_nexthop_cache_rib *pnc_rib = NULL; + enum pim_rpf_lookup_mode mode; - if (pim->rpf_mode == MCAST_MRIB_ONLY) + mode = pim_get_lookup_mode(pim, group, pnc->addr); + + if (mode == MCAST_MRIB_ONLY) pnc_rib = &pnc->mrib; - else if (pim->rpf_mode == MCAST_URIB_ONLY) + else if (mode == MCAST_URIB_ONLY) pnc_rib = &pnc->urib; - else if (pim->rpf_mode == MCAST_MIX_MRIB_FIRST || pim->rpf_mode == MCAST_NO_CONFIG) { + else if (mode == MCAST_MIX_MRIB_FIRST || mode == MCAST_NO_CONFIG) { if (pnc->mrib.nexthop_num > 0) pnc_rib = &pnc->mrib; else pnc_rib = &pnc->urib; - } else if (pim->rpf_mode == MCAST_MIX_DISTANCE) { + } else if (mode == MCAST_MIX_DISTANCE) { if (pnc->mrib.distance <= pnc->urib.distance) pnc_rib = &pnc->mrib; else pnc_rib = &pnc->urib; - } else if (pim->rpf_mode == MCAST_MIX_PFXLEN) { + } else if (mode == MCAST_MIX_PFXLEN) { if (pnc->mrib.prefix_len >= pnc->urib.prefix_len) pnc_rib = &pnc->mrib; else @@ -161,9 +335,151 @@ static struct pim_nexthop_cache_rib *pim_pnc_get_rib(struct pim_instance *pim, return pnc_rib; } -bool pim_nht_pnc_is_valid(struct pim_instance *pim, struct pim_nexthop_cache *pnc) +void pim_nht_change_rpf_mode(struct pim_instance *pim, const char *group_plist, + const char *source_plist, enum pim_rpf_lookup_mode mode) +{ + struct pim_lookup_mode *m; + bool found = false; + bool update = false; + const char *glist = NULL; + const char *slist = NULL; + + /* Prefix lists may be passed in as empty string, leave them NULL instead */ + if (group_plist && strlen(group_plist)) + glist = group_plist; + if (source_plist && strlen(source_plist)) + slist = source_plist; + + frr_each_safe (pim_lookup_mode, &(pim->rpf_mode), m) { + if ((m->grp_plist && glist && strmatch(m->grp_plist, glist)) && + (m->src_plist && slist && strmatch(m->src_plist, slist))) { + /* Group and source plists are both set and matched */ + found = true; + if (mode == MCAST_NO_CONFIG) { + /* MCAST_NO_CONFIG means we should remove this lookup mode + * We don't know what other modes might match, or if only the global, so we need to + * update all lookups + */ + pim_lookup_mode_del(&pim->rpf_mode, m); + pim_lookup_mode_free(m); + glist = NULL; + slist = NULL; + update = true; + } else { + /* Just changing mode */ + update = pim_rpf_mode_changed(m->mode, mode); + m->mode = mode; /* Always make sure the mode is set, even if not updating */ + } + + if (update) + pim_rpf_mode_changed_update(pim, glist, slist); + break; + } + + if ((m->grp_plist && glist && strmatch(m->grp_plist, glist)) && + (!m->src_plist && !slist)) { + /* Only group list set and matched */ + found = true; + if (mode == MCAST_NO_CONFIG) { + /* MCAST_NO_CONFIG means we should remove this lookup mode + * We don't know what other modes might match, or if only the global, so we need to + * update all lookups + */ + pim_lookup_mode_del(&pim->rpf_mode, m); + pim_lookup_mode_free(m); + glist = NULL; + slist = NULL; + update = true; + } else { + /* Just changing mode */ + update = pim_rpf_mode_changed(m->mode, mode); + m->mode = mode; /* Always make sure the mode is set, even if not updating */ + } + + if (update) + pim_rpf_mode_changed_update(pim, glist, slist); + break; + } + + if ((!m->grp_plist && !glist) && + (m->src_plist && slist && strmatch(m->src_plist, slist))) { + /* Only source list set and matched */ + found = true; + if (mode == MCAST_NO_CONFIG) { + /* MCAST_NO_CONFIG means we should remove this lookup mode + * We don't know what other modes might match, or if only the global, so we need to + * update all lookups + */ + pim_lookup_mode_del(&pim->rpf_mode, m); + pim_lookup_mode_free(m); + glist = NULL; + slist = NULL; + update = true; + } else { + /* Just changing mode */ + update = pim_rpf_mode_changed(m->mode, mode); + m->mode = mode; /* Always make sure the mode is set, even if not updating */ + } + + if (update) + pim_rpf_mode_changed_update(pim, glist, slist); + break; + } + + if (!m->grp_plist && !glist && !m->src_plist && !slist) { + /* No prefix lists set, so this is the global mode */ + /* We never delete this mode, even when set back to MCAST_NO_CONFIG */ + update = pim_rpf_mode_changed(m->mode, mode); + m->mode = mode; /* Always make sure the mode is set, even if not updating */ + if (update) + pim_rpf_mode_changed_update(pim, glist, slist); + found = true; + break; + } + } + + if (!found) { + /* Adding a new lookup mode with unique prefix lists, add it */ + m = XCALLOC(MTYPE_PIM_LOOKUP_MODE, sizeof(struct pim_lookup_mode)); + m->grp_plist = XSTRDUP(MTYPE_PIM_LOOKUP_MODE_STR, glist); + m->src_plist = XSTRDUP(MTYPE_PIM_LOOKUP_MODE_STR, slist); + m->mode = mode; + pim_lookup_mode_add(&(pim->rpf_mode), m); + pim_rpf_mode_changed_update(pim, glist, slist); + } +} + +int pim_lookup_mode_write(struct pim_instance *pim, struct vty *vty) { - switch (pim->rpf_mode) { + int writes = 0; + struct pim_lookup_mode *m; + + frr_each_safe (pim_lookup_mode, &(pim->rpf_mode), m) { + if (m->mode == MCAST_NO_CONFIG) + continue; + + ++writes; + vty_out(vty, " rpf-lookup-mode %s", + m->mode == MCAST_URIB_ONLY ? "urib-only" + : m->mode == MCAST_MRIB_ONLY ? "mrib-only" + : m->mode == MCAST_MIX_MRIB_FIRST ? "mrib-then-urib" + : m->mode == MCAST_MIX_DISTANCE ? "lower-distance" + : "longer-prefix"); + + if (m->grp_plist) + vty_out(vty, " group-list %s", m->grp_plist); + + if (m->src_plist) + vty_out(vty, " source-list %s", m->src_plist); + + vty_out(vty, "\n"); + } + return writes; +} + +bool pim_nht_pnc_is_valid(struct pim_instance *pim, struct pim_nexthop_cache *pnc, pim_addr group) +{ + switch (pim_get_lookup_mode(pim, group, pnc->addr)) { case MCAST_MRIB_ONLY: return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_VALID); @@ -275,6 +591,7 @@ bool pim_nht_find_or_track(struct pim_instance *pim, pim_addr addr, struct pim_u { struct pim_nexthop_cache *pnc; struct listnode *ch_node = NULL; + pim_addr group = PIMADDR_ANY; /* This will find the entry and add it to tracking if not found */ pnc = pim_nht_get(pim, addr); @@ -289,10 +606,12 @@ bool pim_nht_find_or_track(struct pim_instance *pim, pim_addr addr, struct pim_u } /* Store the upstream if provided and not currently in the list */ - if (up != NULL) + if (up != NULL) { (void)hash_get(pnc->upstream_hash, up, hash_alloc_intern); + group = up->sg.grp; + } - if (pim_nht_pnc_is_valid(pim, pnc)) { + if (pim_nht_pnc_is_valid(pim, pnc, group)) { if (out_pnc) memcpy(out_pnc, pnc, sizeof(struct pim_nexthop_cache)); return true; @@ -315,7 +634,7 @@ bool pim_nht_candrp_add(struct pim_instance *pim, pim_addr addr) pnc = pim_nht_get(pim, addr); pnc->candrp_count++; - return pim_nht_pnc_is_valid(pim, pnc); + return pim_nht_pnc_is_valid(pim, pnc, PIMADDR_ANY); } static void pim_nht_drop_maybe(struct pim_instance *pim, struct pim_nexthop_cache *pnc) @@ -448,7 +767,7 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr, lookup.addr = bsr_addr; pnc = hash_lookup(pim->nht_hash, &lookup); - if (!pnc || !pim_nht_pnc_has_answer(pim, pnc)) { + if (!pnc || !pim_nht_pnc_has_answer(pim, pnc, PIMADDR_ANY)) { /* BSM from a new freshly registered BSR - do a synchronous * zebra query since otherwise we'd drop the first packet, * leading to additional delay in picking up BSM data @@ -465,9 +784,8 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr, int num_ifindex; memset(nexthop_tab, 0, sizeof(nexthop_tab)); - num_ifindex = zclient_lookup_nexthop( - pim, nexthop_tab, router->multipath, bsr_addr, - PIM_NEXTHOP_LOOKUP_MAX); + num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, bsr_addr, + PIMADDR_ANY, PIM_NEXTHOP_LOOKUP_MAX); if (num_ifindex <= 0) return false; @@ -507,7 +825,7 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr, return false; } - if (pim_nht_pnc_is_valid(pim, pnc)) { + if (pim_nht_pnc_is_valid(pim, pnc, PIMADDR_ANY)) { /* if we accept BSMs from more than one ECMP nexthop, this will cause * BSM message "multiplication" for each ECMP hop. i.e. if you have * 4-way ECMP and 4 hops you end up with 256 copies of each BSM @@ -515,7 +833,7 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr, * * so... only accept the first (IPv4) valid nexthop as source. */ - struct pim_nexthop_cache_rib *rib = pim_pnc_get_rib(pim, pnc); + struct pim_nexthop_cache_rib *rib = pim_pnc_get_rib(pim, pnc, PIMADDR_ANY); for (nh = rib->nexthop; nh; nh = nh->next) { pim_addr nhaddr; @@ -754,14 +1072,17 @@ static bool pim_ecmp_nexthop_search(struct pim_instance *pim, struct pim_nexthop pim_addr nh_addr; pim_addr grp_addr; struct pim_nexthop_cache_rib *rib; + pim_addr group; + + group = pim_addr_from_prefix(grp); /* Early return if required parameters aren't provided */ - if (!pim || !pnc || !pim_nht_pnc_is_valid(pim, pnc) || !nexthop || !grp) + if (!pim || !pnc || !pim_nht_pnc_is_valid(pim, pnc, group) || !nexthop || !grp) return false; nh_addr = nexthop->mrib_nexthop_addr; grp_addr = pim_addr_from_prefix(grp); - rib = pim_pnc_get_rib(pim, pnc); + rib = pim_pnc_get_rib(pim, pnc, group); /* Current Nexthop is VALID, check to stay on the current path. */ if (nexthop->interface && nexthop->interface->info && @@ -934,6 +1255,9 @@ bool pim_nht_lookup_ecmp(struct pim_instance *pim, struct pim_nexthop *nexthop, uint32_t hash_val = 0; uint32_t mod_val = 0; uint32_t num_nbrs = 0; + pim_addr group; + + group = pim_addr_from_prefix(grp); if (PIM_DEBUG_PIM_NHT_DETAIL) zlog_debug("%s: Looking up: %pPA(%s), last lookup time: %lld", __func__, &src, @@ -941,12 +1265,12 @@ bool pim_nht_lookup_ecmp(struct pim_instance *pim, struct pim_nexthop *nexthop, pnc = pim_nexthop_cache_find(pim, src); if (pnc) { - if (pim_nht_pnc_has_answer(pim, pnc)) + if (pim_nht_pnc_has_answer(pim, pnc, group)) return pim_ecmp_nexthop_search(pim, pnc, nexthop, src, grp, neighbor_needed); } memset(nexthop_tab, 0, sizeof(struct pim_zlookup_nexthop) * router->multipath); - num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, src, + num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, src, group, PIM_NEXTHOP_LOOKUP_MAX); if (num_ifindex < 1) { if (PIM_DEBUG_PIM_NHT) @@ -1051,7 +1375,7 @@ bool pim_nht_lookup_ecmp(struct pim_instance *pim, struct pim_nexthop *nexthop, } bool pim_nht_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr addr, - int neighbor_needed) + pim_addr group, bool neighbor_needed) { struct pim_zlookup_nexthop nexthop_tab[router->multipath]; struct pim_neighbor *nbr = NULL; @@ -1087,7 +1411,7 @@ bool pim_nht_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_a &addr, nexthop->last_lookup_time, pim->last_route_change_time); memset(nexthop_tab, 0, sizeof(struct pim_zlookup_nexthop) * router->multipath); - num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, addr, + num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, addr, group, PIM_NEXTHOP_LOOKUP_MAX); if (num_ifindex < 1) { if (PIM_DEBUG_PIM_NHT) @@ -1349,36 +1673,6 @@ void pim_nexthop_update(struct vrf *vrf, struct prefix *match, struct zapi_route pim_crp_nht_update(pim, pnc); } -static int pim_nht_hash_mode_update_helper(struct hash_bucket *bucket, void *arg) -{ - struct pim_nexthop_cache *pnc = bucket->data; - struct pnc_hash_walk_data *pwd = arg; - struct pim_instance *pim = pwd->pim; - - if (listcount(pnc->rp_list)) - pim_update_rp_nh(pim, pnc); - - if (pnc->upstream_hash->count) - pim_update_upstream_nh(pim, pnc); - - if (pnc->candrp_count) - pim_crp_nht_update(pim, pnc); - - return HASHWALK_CONTINUE; -} - -void pim_nht_mode_changed(struct pim_instance *pim) -{ - struct pnc_hash_walk_data pwd; - - /* Update the refresh time to force new lookups if needed */ - pim_rpf_set_refresh_time(pim); - - /* Force update the registered RP and upstreams for all cache entries */ - pwd.pim = pim; - hash_walk(pim->nht_hash, pim_nht_hash_mode_update_helper, &pwd); -} - /* Cleanup pim->nht_hash each node data */ static void pim_nht_hash_clean(void *data) { @@ -1418,11 +1712,19 @@ static bool pim_nht_equal(const void *arg1, const void *arg2) void pim_nht_init(struct pim_instance *pim) { char hash_name[64]; + struct pim_lookup_mode *global_mode; snprintf(hash_name, sizeof(hash_name), "PIM %s NHT Hash", pim->vrf->name); pim->nht_hash = hash_create_size(256, pim_nht_hash_key, pim_nht_equal, hash_name); - pim->rpf_mode = MCAST_NO_CONFIG; + pim_lookup_mode_init(&(pim->rpf_mode)); + + /* Add the default global mode */ + global_mode = XCALLOC(MTYPE_PIM_LOOKUP_MODE, sizeof(*global_mode)); + global_mode->grp_plist = NULL; + global_mode->src_plist = NULL; + global_mode->mode = MCAST_NO_CONFIG; + pim_lookup_mode_add(&(pim->rpf_mode), global_mode); if (PIM_DEBUG_ZEBRA) zlog_debug("%s: NHT hash init: %s ", __func__, hash_name); @@ -1432,4 +1734,7 @@ void pim_nht_terminate(struct pim_instance *pim) { /* Traverse and cleanup nht_hash */ hash_clean_and_free(&pim->nht_hash, (void *)pim_nht_hash_clean); + + pim_lookup_mode_list_free(&(pim->rpf_mode)); + pim_lookup_mode_fini(&(pim->rpf_mode)); } diff --git a/pimd/pim_nht.h b/pimd/pim_nht.h index 144139f406..671fa87202 100644 --- a/pimd/pim_nht.h +++ b/pimd/pim_nht.h @@ -16,6 +16,15 @@ #include "pim_rp.h" #include "pim_rpf.h" +PREDECL_SORTLIST_NONUNIQ(pim_lookup_mode); + +struct pim_lookup_mode { + char *grp_plist; + char *src_plist; + enum pim_rpf_lookup_mode mode; + struct pim_lookup_mode_item list; +}; + /* PIM nexthop cache value structure. */ struct pim_nexthop_cache_rib { /* IGP route's metric. */ @@ -54,8 +63,22 @@ struct pnc_hash_walk_data { struct interface *ifp; }; +/* Find the right lookup mode for the given group and/or source + * either may be ANY (although source should realistically always be provided) + * Find the lookup mode that has matching group and/or source prefix lists, or the global mode. + */ +enum pim_rpf_lookup_mode pim_get_lookup_mode(struct pim_instance *pim, pim_addr group, + pim_addr source); + +/* Change the RPF lookup config, may trigger updates to RP's and Upstreams registered for matching cache entries */ +void pim_nht_change_rpf_mode(struct pim_instance *pim, const char *group_plist, + const char *source_plist, enum pim_rpf_lookup_mode mode); + +/* Write the rpf lookup mode configuration */ +int pim_lookup_mode_write(struct pim_instance *pim, struct vty *vty); + /* Verify that we have nexthop information in the cache entry */ -bool pim_nht_pnc_is_valid(struct pim_instance *pim, struct pim_nexthop_cache *pnc); +bool pim_nht_pnc_is_valid(struct pim_instance *pim, struct pim_nexthop_cache *pnc, pim_addr group); /* Get (or add) the NH cache entry for the given address */ struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim, pim_addr addr); @@ -109,7 +132,7 @@ bool pim_nht_lookup_ecmp(struct pim_instance *pim, struct pim_nexthop *nexthop, * a synchronous lookup. No ECMP decision is made. */ bool pim_nht_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr addr, - int neighbor_needed); + pim_addr group, bool neighbor_needed); /* Performs a pim_nht_lookup_ecmp and returns the mroute VIF index of the nexthop interface */ int pim_nht_lookup_ecmp_if_vif_index(struct pim_instance *pim, pim_addr src, struct prefix *grp); @@ -117,9 +140,6 @@ int pim_nht_lookup_ecmp_if_vif_index(struct pim_instance *pim, pim_addr src, str /* Tracked nexthop update from zebra */ void pim_nexthop_update(struct vrf *vrf, struct prefix *match, struct zapi_route *nhr); -/* RPF lookup mode changed via configuration */ -void pim_nht_mode_changed(struct pim_instance *pim); - /* NHT init and finish funcitons */ void pim_nht_init(struct pim_instance *pim); void pim_nht_terminate(struct pim_instance *pim); diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c index 974cf30cf1..a972a38c72 100644 --- a/pimd/pim_vty.c +++ b/pimd/pim_vty.c @@ -29,6 +29,7 @@ #include "pim_bfd.h" #include "pim_bsm.h" #include "pim_vxlan.h" +#include "pim_nht.h" #include "pim6_mld.h" int pim_debug_config_write(struct vty *vty) @@ -275,15 +276,7 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty) } } - if (pim->rpf_mode != MCAST_NO_CONFIG) { - ++writes; - vty_out(vty, " rpf-lookup-mode %s\n", - pim->rpf_mode == MCAST_URIB_ONLY ? "urib-only" - : pim->rpf_mode == MCAST_MRIB_ONLY ? "mrib-only" - : pim->rpf_mode == MCAST_MIX_MRIB_FIRST ? "mrib-then-urib" - : pim->rpf_mode == MCAST_MIX_DISTANCE ? "lower-distance" - : "longer-prefix"); - } + writes += pim_lookup_mode_write(pim, vty); return writes; } diff --git a/pimd/pim_zlookup.c b/pimd/pim_zlookup.c index febc595ad4..4ffb5bac17 100644 --- a/pimd/pim_zlookup.c +++ b/pimd/pim_zlookup.c @@ -375,12 +375,16 @@ static int zclient_rib_lookup(struct pim_instance *pim, struct pim_zlookup_nexth static int zclient_lookup_nexthop_once(struct pim_instance *pim, struct pim_zlookup_nexthop nexthop_tab[], const int tab_size, - pim_addr addr) + pim_addr addr, pim_addr group) { - if (pim->rpf_mode == MCAST_MRIB_ONLY) + enum pim_rpf_lookup_mode mode; + + mode = pim_get_lookup_mode(pim, group, addr); + + if (mode == MCAST_MRIB_ONLY) return zclient_rib_lookup(pim, nexthop_tab, tab_size, addr, SAFI_MULTICAST); - if (pim->rpf_mode == MCAST_URIB_ONLY) + if (mode == MCAST_URIB_ONLY) return zclient_rib_lookup(pim, nexthop_tab, tab_size, addr, SAFI_UNICAST); /* All other modes require looking up both tables and making a choice */ @@ -420,15 +424,14 @@ static int zclient_lookup_nexthop_once(struct pim_instance *pim, /* Both tables have results, so compare them. Distance and prefix length are the same for all * nexthops, so only compare the first in the list */ - if (pim->rpf_mode == MCAST_MIX_DISTANCE && + if (mode == MCAST_MIX_DISTANCE && mrib_tab[0].protocol_distance > urib_tab[0].protocol_distance) { if (PIM_DEBUG_PIM_NHT_DETAIL) zlog_debug("%s: addr=%pPAs(%s), URIB has shortest distance", __func__, &addr, pim->vrf->name); memcpy(nexthop_tab, urib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size); return urib_num; - } else if (pim->rpf_mode == MCAST_MIX_PFXLEN && - mrib_tab[0].prefix_len < urib_tab[0].prefix_len) { + } else if (mode == MCAST_MIX_PFXLEN && mrib_tab[0].prefix_len < urib_tab[0].prefix_len) { if (PIM_DEBUG_PIM_NHT_DETAIL) zlog_debug("%s: addr=%pPAs(%s), URIB has lengthest prefix length", __func__, &addr, pim->vrf->name); @@ -459,15 +462,13 @@ void zclient_lookup_read_pipe(struct event *thread) return; } - zclient_lookup_nexthop_once(pim, nexthop_tab, 10, l); + zclient_lookup_nexthop_once(pim, nexthop_tab, 10, l, PIMADDR_ANY); event_add_timer(router->master, zclient_lookup_read_pipe, zlookup, 60, &zlookup_read); } -int zclient_lookup_nexthop(struct pim_instance *pim, - struct pim_zlookup_nexthop nexthop_tab[], - const int tab_size, pim_addr addr, - int max_lookup) +int zclient_lookup_nexthop(struct pim_instance *pim, struct pim_zlookup_nexthop nexthop_tab[], + const int tab_size, pim_addr addr, pim_addr group, int max_lookup) { int lookup; uint32_t route_metric = 0xFFFFFFFF; @@ -480,8 +481,7 @@ int zclient_lookup_nexthop(struct pim_instance *pim, int first_ifindex; pim_addr nexthop_addr; - num_ifindex = zclient_lookup_nexthop_once(pim, nexthop_tab, - tab_size, addr); + num_ifindex = zclient_lookup_nexthop_once(pim, nexthop_tab, tab_size, addr, group); if (num_ifindex < 1) { if (PIM_DEBUG_PIM_NHT_DETAIL) zlog_debug( diff --git a/pimd/pim_zlookup.h b/pimd/pim_zlookup.h index c9461eb7e3..720cc4fca6 100644 --- a/pimd/pim_zlookup.h +++ b/pimd/pim_zlookup.h @@ -27,10 +27,8 @@ struct pim_zlookup_nexthop { void zclient_lookup_new(void); void zclient_lookup_free(void); -int zclient_lookup_nexthop(struct pim_instance *pim, - struct pim_zlookup_nexthop nexthop_tab[], - const int tab_size, pim_addr addr, - int max_lookup); +int zclient_lookup_nexthop(struct pim_instance *pim, struct pim_zlookup_nexthop nexthop_tab[], + const int tab_size, pim_addr addr, pim_addr group, int max_lookup); void pim_zlookup_show_ip_multicast(struct vty *vty); diff --git a/redhat/frr.spec.in b/redhat/frr.spec.in index 1ba8f043b2..4e0196d24a 100644 --- a/redhat/frr.spec.in +++ b/redhat/frr.spec.in @@ -31,6 +31,8 @@ %{!?with_watchfrr: %global with_watchfrr 1 } %{!?with_pathd: %global with_pathd 1 } %{!?with_grpc: %global with_grpc 0 } +%{!?with_rpki: %global with_rpki 1 } +%{!?with_docs: %global with_docs 1 } # user and group %{!?frr_user: %global frr_user frr } @@ -189,20 +191,29 @@ BuildRequires: ncurses-devel BuildRequires: readline-devel BuildRequires: texinfo BuildRequires: libyang-devel >= 2.1.128 +# Version requirement don't get reflected down from a BuildRequire +# to Require, so need to require libyang version as both ways +Requires: libyang >= 2.1.128 BuildRequires: pcre2-devel %if 0%{?rhel} && 0%{?rhel} < 7 #python27-devel is available from ius community repo for RedHat/CentOS 6 BuildRequires: python27-devel +%if %{with_docs} BuildRequires: python27-sphinx +%endif %else %if %{use_python2} BuildRequires: python-devel >= 2.7 +%if %{with_docs} BuildRequires: python-sphinx +%endif %else BuildRequires: python3-devel +%if %{with_docs} BuildRequires: python3-sphinx %endif %endif +%endif %if %{with_grpc} BuildRequires: grpc-devel >= 1.16.1 BuildRequires: protobuf-devel >= 3.6.1 @@ -283,6 +294,7 @@ The frr-devel package contains the header and object files necessary for developing OSPF-API and frr applications. +%if %{with_rpki} %package rpki-rtrlib Summary: BGP RPKI support (rtrlib) Group: System Environment/Daemons @@ -295,6 +307,7 @@ against cryptographic information stored in WHOIS databases. This is used to prevent hijacking of networks on the wider internet. It is only relevant to internet service providers using their own autonomous system number. +%endif %package snmp @@ -433,7 +446,9 @@ Adds GRPC support to the individual FRR daemons. --disable-bgp-vnc \ %endif --enable-isisd \ +%if %{with_rpki} --enable-rpki \ +%endif %if %{with_bfdd} --enable-bfdd \ %else @@ -465,9 +480,11 @@ sed -e '1c #!/usr/bin/python3' -i %{zeb_src}/tools/frr-reload.py sed -e '1c #!/usr/bin/python3' -i %{zeb_src}/tools/generate_support_bundle.py %endif +%if %{with_docs} pushd doc make info popd +%endif %install @@ -605,7 +622,9 @@ zebra_spec_add_service fabricd 2618/tcp "Fabricd vty" %__sed -i 's|watchfrr_enable=no|watchfrr_enable=yes|g' %{configdir}/daemons 2> /dev/null || true %endif +%if %{with_docs} /sbin/install-info %{_infodir}/frr.info.gz %{_infodir}/dir +%endif # Create dummy config file if they don't exist so basic functions can be used. if [ ! -e %{configdir}/frr.conf ] && [ ! -e %{configdir}/zebra.conf ]; then @@ -673,7 +692,9 @@ fi /sbin/chkconfig --del frr fi %endif +%if %{with_docs} /sbin/install-info --delete %{_infodir}/frr.info.gz %{_infodir}/dir +%endif %files @@ -690,8 +711,10 @@ fi %dir %attr(755,root,root) %{_localstatedir}/log/frr %dir %attr(750,root,root) %{_runstatedir}/frr %endif -%{_infodir}/frr.info.gz -%{_mandir}/man*/* +%if %{with_docs} + %{_infodir}/frr.info.gz + %{_mandir}/man*/* +%endif %{_sbindir}/zebra %{_sbindir}/staticd %{_sbindir}/ospfd @@ -739,19 +762,9 @@ fi %endif %if %{with_pathd} %{_sbindir}/pathd - %{_libdir}/frr/modules/pathd_pcep.so %endif -%{_libdir}/libfrr.so* -%{_libdir}/libfrrcares* -%{_libdir}/libfrrospf* -%if %{with_fpm} - %{_libdir}/frr/modules/zebra_fpm.so -%endif -%{_libdir}/frr/modules/zebra_cumulus_mlag.so -%{_libdir}/frr/modules/dplane_fpm_nl.so -%{_libdir}/frr/modules/bgpd_bmp.so -%{_libdir}/libfrr_pb.so* -%{_libdir}/libfrrfpm_pb.so* +%{_libdir}/libfrr*.so* +%{_libdir}/frr/modules/*.so %{_libdir}/libmgmt_be_nb.so* %{_bindir}/* %config(noreplace) %{configdir}/[!v]*.conf* @@ -791,6 +804,7 @@ fi %endif +%if %{with_rpki} %post rpki-rtrlib # add rpki module to daemons sed -i -e 's/^\(bgpd_options=\)\(.*\)\(".*\)/\1\2 -M rpki\3/' %{_sysconfdir}/frr/daemons @@ -798,22 +812,9 @@ sed -i -e 's/^\(bgpd_options=\)\(.*\)\(".*\)/\1\2 -M rpki\3/' %{_sysconfdir}/frr %postun rpki-rtrlib # remove rpki module from daemons sed -i 's/ -M rpki//' %{_sysconfdir}/frr/daemons - -%files rpki-rtrlib -%{_libdir}/frr/modules/bgpd_rpki.so - - -%files snmp -%{_libdir}/libfrrsnmp.so* -%{_libdir}/frr/modules/*snmp.so - - -%if %{with_grpc} -%files grpc -%{_libdir}/libfrrgrpc_pb.* -%{_libdir}/frr/modules/grpc.so %endif + %files devel %{_libdir}/lib*.so %dir %{_includedir}/%{name} diff --git a/ripd/rip_main.c b/ripd/rip_main.c index 67469f5fe5..cfe4a7e437 100644 --- a/ripd/rip_main.c +++ b/ripd/rip_main.c @@ -127,6 +127,7 @@ static struct frr_signal_t ripd_signals[] = { }; static const struct frr_yang_module_info *const ripd_yang_modules[] = { + &frr_backend_info, &frr_filter_info, &frr_interface_info, &frr_ripd_info, diff --git a/ripngd/ripng_main.c b/ripngd/ripng_main.c index ada9ad4e78..b3584b9c3a 100644 --- a/ripngd/ripng_main.c +++ b/ripngd/ripng_main.c @@ -120,6 +120,7 @@ struct frr_signal_t ripng_signals[] = { }; static const struct frr_yang_module_info *const ripngd_yang_modules[] = { + &frr_backend_info, &frr_filter_info, &frr_interface_info, &frr_ripngd_info, diff --git a/staticd/static_debug.c b/staticd/static_debug.c index 618ba91d12..b308860424 100644 --- a/staticd/static_debug.c +++ b/staticd/static_debug.c @@ -22,6 +22,7 @@ struct debug static_dbg_events = {0, "debug static events", "Staticd events"}; struct debug static_dbg_route = {0, "debug static route", "Staticd route"}; struct debug static_dbg_bfd = {0, "debug static bfd", "Staticd bfd"}; +struct debug static_dbg_srv6 = {0, "debug static srv6", "Staticd srv6"}; /* clang-format on */ /* @@ -37,8 +38,7 @@ struct debug static_dbg_bfd = {0, "debug static bfd", "Staticd bfd"}; * Debug general internal events * */ -void static_debug_set(int vtynode, bool onoff, bool events, bool route, - bool bfd) +void static_debug_set(int vtynode, bool onoff, bool events, bool route, bool bfd, bool srv6) { uint32_t mode = DEBUG_NODE2MODE(vtynode); @@ -50,6 +50,8 @@ void static_debug_set(int vtynode, bool onoff, bool events, bool route, DEBUG_MODE_SET(&static_dbg_bfd, mode, onoff); bfd_protocol_integration_set_debug(onoff); } + if (srv6) + DEBUG_MODE_SET(&static_dbg_srv6, mode, onoff); } /* @@ -61,4 +63,5 @@ void static_debug_init(void) debug_install(&static_dbg_events); debug_install(&static_dbg_route); debug_install(&static_dbg_bfd); + debug_install(&static_dbg_srv6); } diff --git a/staticd/static_debug.h b/staticd/static_debug.h index b990f7bcc9..a16e398eba 100644 --- a/staticd/static_debug.h +++ b/staticd/static_debug.h @@ -20,6 +20,7 @@ extern "C" { extern struct debug static_dbg_events; extern struct debug static_dbg_route; extern struct debug static_dbg_bfd; +extern struct debug static_dbg_srv6; /* * Initialize staticd debugging. @@ -41,8 +42,7 @@ void static_debug_init(void); * Debug general internal events * */ -void static_debug_set(int vtynode, bool onoff, bool events, bool route, - bool bfd); +void static_debug_set(int vtynode, bool onoff, bool events, bool route, bool bfd, bool srv6); #ifdef __cplusplus } diff --git a/staticd/static_main.c b/staticd/static_main.c index 9468a98b83..3b59ca6a75 100644 --- a/staticd/static_main.c +++ b/staticd/static_main.c @@ -26,6 +26,7 @@ #include "static_zebra.h" #include "static_debug.h" #include "static_nb.h" +#include "static_srv6.h" #include "mgmt_be_client.h" @@ -76,6 +77,10 @@ static void sigint(void) static_vrf_terminate(); static_zebra_stop(); + + /* clean up SRv6 data structures */ + static_srv6_cleanup(); + frr_fini(); exit(0); @@ -107,6 +112,7 @@ struct frr_signal_t static_signals[] = { }; static const struct frr_yang_module_info *const staticd_yang_modules[] = { + &frr_backend_info, &frr_interface_info, &frr_vrf_info, &frr_routing_info, @@ -160,6 +166,9 @@ int main(int argc, char **argv, char **envp) static_debug_init(); static_vrf_init(); + /* initialize SRv6 data structures */ + static_srv6_init(); + static_zebra_init(); static_vty_init(); diff --git a/staticd/static_nb.c b/staticd/static_nb.c index e6aa71a77b..ef363bfe7e 100644 --- a/staticd/static_nb.c +++ b/staticd/static_nb.c @@ -135,93 +135,32 @@ const struct frr_yang_module_info frr_staticd_info = { } }, { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list", + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6/static-sids/sid", .cbs = { - .create = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_create, - .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_destroy, + .apply_finish = routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_apply_finish, + .create = routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_create, + .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_destroy, } }, { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list", + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6/static-sids/sid/behavior", .cbs = { - .create = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_create, - .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_destroy, + .modify = routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_behavior_modify, + .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_behavior_destroy, } }, { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/tag", + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6/static-sids/sid/vrf-name", .cbs = { - .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_tag_modify, + .modify = routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_vrf_name_modify, + .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_vrf_name_destroy, } }, { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop", + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6/static-sids/sid/locator-name", .cbs = { - .apply_finish = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_apply_finish, - .create = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_create, - .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_destroy, - .pre_validate = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_pre_validate, - } - }, - { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/bh-type", - .cbs = { - .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_bh_type_modify, - } - }, - { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/onlink", - .cbs = { - .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_modify, - } - }, - { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/srte-color", - .cbs = { - .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_modify, - .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_destroy, - } - }, - { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/srv6-segs-stack/entry", - .cbs = { - .create = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_create, - .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_destroy, - } - }, - { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/srv6-segs-stack/entry/seg", - .cbs = { - .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_seg_modify, - .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_seg_destroy, - } - }, - { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry", - .cbs = { - .create = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_create, - .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_destroy, - } - }, - { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry/label", - .cbs = { - .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_label_modify, - .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_label_destroy, - } - }, - { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry/ttl", - .cbs = { - .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_modify, - .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_destroy, - } - }, - { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry/traffic-class", - .cbs = { - .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_modify, - .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_destroy, + .modify = routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_locator_name_modify, + .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_locator_name_destroy, } }, { diff --git a/staticd/static_nb.h b/staticd/static_nb.h index be75d9d38c..aa11f34021 100644 --- a/staticd/static_nb.h +++ b/staticd/static_nb.h @@ -72,58 +72,40 @@ int route_next_hop_bfd_source_destroy(struct nb_cb_destroy_args *args); int route_next_hop_bfd_profile_modify(struct nb_cb_modify_args *args); int route_next_hop_bfd_profile_destroy(struct nb_cb_destroy_args *args); int route_next_hop_bfd_multi_hop_modify(struct nb_cb_modify_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_create( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_create( struct nb_cb_create_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_destroy( struct nb_cb_destroy_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_create( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_create( struct nb_cb_create_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_destroy( struct nb_cb_destroy_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_tag_modify( - struct nb_cb_modify_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_create( - struct nb_cb_create_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_destroy( - struct nb_cb_destroy_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_bh_type_modify( - struct nb_cb_modify_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_modify( - struct nb_cb_modify_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_modify( - struct nb_cb_modify_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_destroy( - struct nb_cb_destroy_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_create( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_create( struct nb_cb_create_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_destroy( - struct nb_cb_destroy_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_seg_modify( - struct nb_cb_modify_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_seg_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_destroy( struct nb_cb_destroy_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_create( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_create( struct nb_cb_create_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_destroy( struct nb_cb_destroy_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_label_modify( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_behavior_modify( struct nb_cb_modify_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_label_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_behavior_destroy( struct nb_cb_destroy_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_modify( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_vrf_name_modify( struct nb_cb_modify_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_vrf_name_destroy( struct nb_cb_destroy_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_modify( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_locator_name_modify( struct nb_cb_modify_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_locator_name_destroy( struct nb_cb_destroy_args *args); /* Optional 'apply_finish' callbacks. */ void routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_apply_finish( struct nb_cb_apply_finish_args *args); -void routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_apply_finish( +void routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_apply_finish( struct nb_cb_apply_finish_args *args); /* Optional 'pre_validate' callbacks. */ @@ -139,16 +121,16 @@ int routing_control_plane_protocols_name_validate( /* xpath macros */ /* route-list */ -#define FRR_STATIC_ROUTE_INFO_KEY_XPATH \ - "/frr-routing:routing/control-plane-protocols/" \ - "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \ - "frr-staticd:staticd/route-list[prefix='%s'][afi-safi='%s']/" \ +#define FRR_STATIC_ROUTE_INFO_KEY_XPATH \ + "/frr-routing:routing/control-plane-protocols/" \ + "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \ + "frr-staticd:staticd/route-list[prefix='%s'][src-prefix='%s'][afi-safi='%s']/" \ "path-list[table-id='%u'][distance='%u']" -#define FRR_STATIC_ROUTE_INFO_KEY_NO_DISTANCE_XPATH \ - "/frr-routing:routing/control-plane-protocols/" \ - "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \ - "frr-staticd:staticd/route-list[prefix='%s'][afi-safi='%s']/" \ +#define FRR_STATIC_ROUTE_INFO_KEY_NO_DISTANCE_XPATH \ + "/frr-routing:routing/control-plane-protocols/" \ + "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \ + "frr-staticd:staticd/route-list[prefix='%s'][src-prefix='%s'][afi-safi='%s']/" \ "path-list[table-id='%u']" @@ -173,19 +155,6 @@ int routing_control_plane_protocols_name_validate( #define FRR_STATIC_ROUTE_NH_SRV6_KEY_SEG_XPATH "/entry[id='%u']/seg" -/* route-list/srclist */ -#define FRR_S_ROUTE_SRC_INFO_KEY_XPATH \ - "/frr-routing:routing/control-plane-protocols/" \ - "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \ - "frr-staticd:staticd/route-list[prefix='%s'][afi-safi='%s']/" \ - "src-list[src-prefix='%s']/path-list[table-id='%u'][distance='%u']" - -#define FRR_S_ROUTE_SRC_INFO_KEY_NO_DISTANCE_XPATH \ - "/frr-routing:routing/control-plane-protocols/" \ - "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \ - "frr-staticd:staticd/route-list[prefix='%s'][afi-safi='%s']/" \ - "src-list[src-prefix='%s']/path-list[table-id='%u']" - /* route-list/frr-nexthops */ #define FRR_DEL_S_ROUTE_NH_KEY_XPATH \ FRR_STATIC_ROUTE_INFO_KEY_XPATH \ @@ -196,15 +165,23 @@ int routing_control_plane_protocols_name_validate( FRR_STATIC_ROUTE_INFO_KEY_NO_DISTANCE_XPATH \ FRR_STATIC_ROUTE_NH_KEY_XPATH -/* route-list/src/src-list/frr-nexthops*/ -#define FRR_DEL_S_ROUTE_SRC_NH_KEY_XPATH \ - FRR_S_ROUTE_SRC_INFO_KEY_XPATH \ - FRR_STATIC_ROUTE_NH_KEY_XPATH +/* srv6 */ +#define FRR_STATIC_SRV6_INFO_KEY_XPATH \ + "/frr-routing:routing/control-plane-protocols/" \ + "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \ + "frr-staticd:staticd/segment-routing/srv6" -/* route-list/src/src-list/frr-nexthops*/ -#define FRR_DEL_S_ROUTE_SRC_NH_KEY_NO_DISTANCE_XPATH \ - FRR_S_ROUTE_SRC_INFO_KEY_NO_DISTANCE_XPATH \ - FRR_STATIC_ROUTE_NH_KEY_XPATH +/* srv6/static-sids */ +#define FRR_STATIC_SRV6_SID_KEY_XPATH \ + FRR_STATIC_SRV6_INFO_KEY_XPATH \ + "/static-sids/" \ + "sid[sid='%s']" + +#define FRR_STATIC_SRV6_SID_BEHAVIOR_XPATH "/behavior" + +#define FRR_STATIC_SRV6_SID_VRF_NAME_XPATH "/vrf-name" + +#define FRR_STATIC_SRV6_SID_LOCATOR_NAME_XPATH "/locator-name" #ifdef __cplusplus } diff --git a/staticd/static_nb_config.c b/staticd/static_nb_config.c index 7de5f0474a..e2ab1f2ffe 100644 --- a/staticd/static_nb_config.c +++ b/staticd/static_nb_config.c @@ -20,6 +20,9 @@ #include "static_nb.h" #include "static_zebra.h" +#include "static_srv6.h" +#include "static_debug.h" + static int static_path_list_create(struct nb_cb_create_args *args) { @@ -499,16 +502,6 @@ void routing_control_plane_protocols_control_plane_protocol_staticd_route_list_p static_install_nexthop(nh); } -void routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_apply_finish( - struct nb_cb_apply_finish_args *args) -{ - struct static_nexthop *nh; - - nh = nb_running_get_entry(args->dnode, NULL, true); - - static_install_nexthop(nh); -} - int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_pre_validate( struct nb_cb_pre_validate_args *args) { @@ -573,7 +566,7 @@ int routing_control_plane_protocols_staticd_destroy( if (!stable) continue; - for (rn = route_top(stable); rn; rn = route_next(rn)) + for (rn = route_top(stable); rn; rn = srcdest_route_next(rn)) static_del_route(rn); } @@ -592,7 +585,7 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_cr struct static_vrf *svrf; struct route_node *rn; const struct lyd_node *vrf_dnode; - struct prefix prefix; + struct prefix prefix, src_prefix, *src_p; const char *afi_safi; afi_t prefix_afi; afi_t afi; @@ -601,6 +594,8 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_cr switch (args->event) { case NB_EV_VALIDATE: yang_dnode_get_prefix(&prefix, args->dnode, "prefix"); + yang_dnode_get_prefix(&src_prefix, args->dnode, "src-prefix"); + src_p = src_prefix.prefixlen ? &src_prefix : NULL; afi_safi = yang_dnode_get_string(args->dnode, "afi-safi"); yang_afi_safi_identity2value(afi_safi, &afi, &safi); prefix_afi = family2afi(prefix.family); @@ -611,6 +606,14 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_cr yang_dnode_get_string(args->dnode, "prefix")); return NB_ERR_VALIDATION; } + + if (src_p && afi != AFI_IP6) { + flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE, + "invalid use of IPv6 dst-src prefix %s on %s", + yang_dnode_get_string(args->dnode, "src-prefix"), + yang_dnode_get_string(args->dnode, "prefix")); + return NB_ERR_VALIDATION; + } break; case NB_EV_PREPARE: case NB_EV_ABORT: @@ -621,10 +624,12 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_cr svrf = nb_running_get_entry(vrf_dnode, NULL, true); yang_dnode_get_prefix(&prefix, args->dnode, "prefix"); + yang_dnode_get_prefix(&src_prefix, args->dnode, "src-prefix"); + src_p = src_prefix.prefixlen ? &src_prefix : NULL; afi_safi = yang_dnode_get_string(args->dnode, "afi-safi"); yang_afi_safi_identity2value(afi_safi, &afi, &safi); - rn = static_add_route(afi, safi, &prefix, NULL, svrf); + rn = static_add_route(afi, safi, &prefix, (struct prefix_ipv6 *)src_p, svrf); if (!svrf->vrf || svrf->vrf->vrf_id == VRF_UNKNOWN) snprintf( args->errmsg, args->errmsg_len, @@ -1045,325 +1050,219 @@ int route_next_hop_bfd_profile_destroy(struct nb_cb_destroy_args *args) /* * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_create( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_create( struct nb_cb_create_args *args) { - struct static_vrf *s_vrf; - struct route_node *rn; - struct route_node *src_rn; - struct prefix_ipv6 src_prefix = {}; - struct stable_info *info; - afi_t afi; - safi_t safi = SAFI_UNICAST; - - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; - case NB_EV_APPLY: - rn = nb_running_get_entry(args->dnode, NULL, true); - info = route_table_get_info(rn->table); - s_vrf = info->svrf; - yang_dnode_get_ipv6p(&src_prefix, args->dnode, "src-prefix"); - afi = family2afi(src_prefix.family); - src_rn = - static_add_route(afi, safi, &rn->p, &src_prefix, s_vrf); - nb_running_set_entry(args->dnode, src_rn); - break; - } return NB_OK; } -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_destroy( struct nb_cb_destroy_args *args) { - struct route_node *src_rn; - - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; - case NB_EV_APPLY: - src_rn = nb_running_unset_entry(args->dnode); - static_del_route(src_rn); - break; - } - return NB_OK; } /* * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6 */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_create( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_create( struct nb_cb_create_args *args) { - return static_path_list_create(args); + return NB_OK; } -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_destroy( struct nb_cb_destroy_args *args) { - return static_path_list_destroy(args); -} - -/* - * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/tag - */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_tag_modify( - struct nb_cb_modify_args *args) -{ - return static_path_list_tag_modify(args); + return NB_OK; } /* * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6/static-sids */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_create( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_create( struct nb_cb_create_args *args) { - return static_nexthop_create(args); + return NB_OK; } -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_destroy( struct nb_cb_destroy_args *args) { - return static_nexthop_destroy(args); + return NB_OK; } /* * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/bh-type + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6/locators/locator/static-sids/sid */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_bh_type_modify( - struct nb_cb_modify_args *args) +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_create( + struct nb_cb_create_args *args) { - return static_nexthop_bh_type_modify(args); -} + struct static_srv6_sid *sid; + struct prefix_ipv6 sid_value; -/* - * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/onlink - */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_modify( - struct nb_cb_modify_args *args) -{ - return static_nexthop_onlink_modify(args); -} + if (args->event != NB_EV_APPLY) + return NB_OK; -/* - * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/srte-color - */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_modify( - struct nb_cb_modify_args *args) -{ - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; - case NB_EV_APPLY: - if (static_nexthop_color_modify(args) != NB_OK) - return NB_ERR; + yang_dnode_get_ipv6p(&sid_value, args->dnode, "sid"); + sid = static_srv6_sid_alloc(&sid_value); + nb_running_set_entry(args->dnode, sid); - break; - } return NB_OK; } - -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_destroy( struct nb_cb_destroy_args *args) { - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; - case NB_EV_APPLY: - if (static_nexthop_color_destroy(args) != NB_OK) - return NB_ERR; - break; - } - return NB_OK; -} + struct static_srv6_sid *sid; -/* - * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/srv6-segs-stack/entry - */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_create( - struct nb_cb_create_args *args) -{ - return nexthop_srv6_segs_stack_entry_create(args); -} + if (args->event != NB_EV_APPLY) + return NB_OK; -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_destroy( - struct nb_cb_destroy_args *args) -{ - return nexthop_srv6_segs_stack_entry_destroy(args); -} + sid = nb_running_unset_entry(args->dnode); + listnode_delete(srv6_sids, sid); + static_srv6_sid_del(sid); -/* - * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/srv6-segs-stack/entry/seg - */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_seg_modify( - struct nb_cb_modify_args *args) -{ - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; - case NB_EV_APPLY: - if (static_nexthop_srv6_segs_modify(args) != NB_OK) - return NB_ERR; - break; - } return NB_OK; } -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_seg_destroy( - struct nb_cb_destroy_args *args) +void routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_apply_finish( + struct nb_cb_apply_finish_args *args) { - /* - * No operation is required in this call back. - * nexthop_mpls_seg_stack_entry_destroy() will take care - * to reset the seg vaue. - */ - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - case NB_EV_APPLY: - break; + struct static_srv6_sid *sid; + struct static_srv6_locator *locator; + + sid = nb_running_get_entry(args->dnode, NULL, true); + + locator = static_srv6_locator_lookup(sid->locator_name); + if (!locator) { + DEBUGD(&static_dbg_srv6, + "%s: Locator %s not found, trying to get locator information from zebra", + __func__, sid->locator_name); + static_zebra_srv6_manager_get_locator(sid->locator_name); + listnode_add(srv6_sids, sid); + return; } - return NB_OK; -} -/* - * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry - */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_create( - struct nb_cb_create_args *args) -{ - return nexthop_mpls_label_stack_entry_create(args); -} + sid->locator = locator; -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_destroy( - struct nb_cb_destroy_args *args) -{ - return nexthop_mpls_label_stack_entry_destroy(args); + listnode_add(srv6_sids, sid); + static_zebra_request_srv6_sid(sid); } /* * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry/label + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6/locators/locator/static-sids/sid/behavior */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_label_modify( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_behavior_modify( struct nb_cb_modify_args *args) { - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; - case NB_EV_APPLY: - if (static_nexthop_mpls_label_modify(args) != NB_OK) - return NB_ERR; - break; + struct static_srv6_sid *sid; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + sid = nb_running_get_entry(args->dnode, NULL, true); + + /* Release and uninstall existing SID, if any, before requesting the new one */ + if (CHECK_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_VALID)) { + static_zebra_release_srv6_sid(sid); + UNSET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_VALID); + } + + if (CHECK_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA)) { + static_zebra_srv6_sid_uninstall(sid); + UNSET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA); } + + sid->behavior = yang_dnode_get_enum(args->dnode, "../behavior"); + return NB_OK; } -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_label_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_behavior_destroy( struct nb_cb_destroy_args *args) { - /* - * No operation is required in this call back. - * nexthop_mpls_label_stack_entry_destroy() will take care - * to reset the label vaue. - */ - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - case NB_EV_APPLY: - break; - } return NB_OK; } /* * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry/ttl + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6/locators/locator/static-sids/sid/vrf-name */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_modify( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_vrf_name_modify( struct nb_cb_modify_args *args) { - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - case NB_EV_APPLY: - break; + struct static_srv6_sid *sid; + const char *vrf_name; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + sid = nb_running_get_entry(args->dnode, NULL, true); + + /* Release and uninstall existing SID, if any, before requesting the new one */ + if (CHECK_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_VALID)) { + static_zebra_release_srv6_sid(sid); + UNSET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_VALID); + } + + if (CHECK_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA)) { + static_zebra_srv6_sid_uninstall(sid); + UNSET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA); } + vrf_name = yang_dnode_get_string(args->dnode, "../vrf-name"); + snprintf(sid->attributes.vrf_name, sizeof(sid->attributes.vrf_name), "%s", vrf_name); + return NB_OK; } -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_vrf_name_destroy( struct nb_cb_destroy_args *args) { - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - case NB_EV_APPLY: - break; - } - return NB_OK; } /* * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry/traffic-class + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6/locators/locator/static-sids/sid/vrf-name */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_modify( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_locator_name_modify( struct nb_cb_modify_args *args) { - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - case NB_EV_APPLY: - break; + struct static_srv6_sid *sid; + const char *loc_name; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + sid = nb_running_get_entry(args->dnode, NULL, true); + + /* Release and uninstall existing SID, if any, before requesting the new one */ + if (CHECK_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_VALID)) { + static_zebra_release_srv6_sid(sid); + UNSET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_VALID); + } + + if (CHECK_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA)) { + static_zebra_srv6_sid_uninstall(sid); + UNSET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA); } + loc_name = yang_dnode_get_string(args->dnode, "../locator-name"); + snprintf(sid->locator_name, sizeof(sid->locator_name), "%s", loc_name); + return NB_OK; } -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_destroy( +int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_locator_name_destroy( struct nb_cb_destroy_args *args) { - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - case NB_EV_APPLY: - break; - } - return NB_OK; } diff --git a/staticd/static_nht.c b/staticd/static_nht.c index 06d27c6f59..367ee85040 100644 --- a/staticd/static_nht.c +++ b/staticd/static_nht.c @@ -49,8 +49,8 @@ static void static_nht_update_path(struct static_path *pn, struct prefix *nhp, static_zebra_route_add(pn, true); } -static void static_nht_update_safi(struct prefix *sp, struct prefix *nhp, - uint32_t nh_num, afi_t afi, safi_t safi, +static void static_nht_update_safi(const struct prefix *sp, const struct prefix *ssrc_p, + struct prefix *nhp, uint32_t nh_num, afi_t afi, safi_t safi, struct static_vrf *svrf, vrf_id_t nh_vrf_id) { struct route_table *stable; @@ -63,7 +63,7 @@ static void static_nht_update_safi(struct prefix *sp, struct prefix *nhp, return; if (sp) { - rn = srcdest_rnode_lookup(stable, sp, NULL); + rn = srcdest_rnode_lookup(stable, sp, (const struct prefix_ipv6 *)ssrc_p); if (rn && rn->info) { si = static_route_info_from_rnode(rn); frr_each(static_path_list, &si->path_list, pn) { @@ -75,7 +75,7 @@ static void static_nht_update_safi(struct prefix *sp, struct prefix *nhp, return; } - for (rn = route_top(stable); rn; rn = route_next(rn)) { + for (rn = route_top(stable); rn; rn = srcdest_route_next(rn)) { si = static_route_info_from_rnode(rn); if (!si) continue; @@ -85,14 +85,13 @@ static void static_nht_update_safi(struct prefix *sp, struct prefix *nhp, } } -void static_nht_update(struct prefix *sp, struct prefix *nhp, uint32_t nh_num, - afi_t afi, safi_t safi, vrf_id_t nh_vrf_id) +void static_nht_update(const struct prefix *sp, const struct prefix *ssrc_p, struct prefix *nhp, + uint32_t nh_num, afi_t afi, safi_t safi, vrf_id_t nh_vrf_id) { struct static_vrf *svrf; RB_FOREACH (svrf, svrf_name_head, &svrfs) - static_nht_update_safi(sp, nhp, nh_num, afi, safi, svrf, - nh_vrf_id); + static_nht_update_safi(sp, ssrc_p, nhp, nh_num, afi, safi, svrf, nh_vrf_id); } static void static_nht_reset_start_safi(struct prefix *nhp, afi_t afi, @@ -109,7 +108,7 @@ static void static_nht_reset_start_safi(struct prefix *nhp, afi_t afi, if (!stable) return; - for (rn = route_top(stable); rn; rn = route_next(rn)) { + for (rn = route_top(stable); rn; rn = srcdest_route_next(rn)) { si = static_route_info_from_rnode(rn); if (!si) continue; @@ -150,8 +149,8 @@ void static_nht_reset_start(struct prefix *nhp, afi_t afi, safi_t safi, static_nht_reset_start_safi(nhp, afi, safi, svrf, nh_vrf_id); } -static void static_nht_mark_state_safi(struct prefix *sp, afi_t afi, - safi_t safi, struct vrf *vrf, +static void static_nht_mark_state_safi(const struct prefix *sp, const struct prefix *ssrc_p, + afi_t afi, safi_t safi, struct vrf *vrf, enum static_install_states state) { struct static_vrf *svrf; @@ -169,7 +168,7 @@ static void static_nht_mark_state_safi(struct prefix *sp, afi_t afi, if (!stable) return; - rn = srcdest_rnode_lookup(stable, sp, NULL); + rn = srcdest_rnode_lookup(stable, sp, (const struct prefix_ipv6 *)ssrc_p); if (!rn) return; si = rn->info; @@ -184,8 +183,8 @@ static void static_nht_mark_state_safi(struct prefix *sp, afi_t afi, route_unlock_node(rn); } -void static_nht_mark_state(struct prefix *sp, safi_t safi, vrf_id_t vrf_id, - enum static_install_states state) +void static_nht_mark_state(const struct prefix *sp, const struct prefix *ssrc_p, safi_t safi, + vrf_id_t vrf_id, enum static_install_states state) { struct vrf *vrf; @@ -198,5 +197,5 @@ void static_nht_mark_state(struct prefix *sp, safi_t safi, vrf_id_t vrf_id, if (!vrf || !vrf->info) return; - static_nht_mark_state_safi(sp, afi, safi, vrf, state); + static_nht_mark_state_safi(sp, ssrc_p, afi, safi, vrf, state); } diff --git a/staticd/static_nht.h b/staticd/static_nht.h index 74f4401e49..41ff30cd52 100644 --- a/staticd/static_nht.h +++ b/staticd/static_nht.h @@ -16,15 +16,14 @@ extern "C" { * us call this function to find the nexthop we are tracking so it * can be installed or removed. * - * sp -> The route we are looking at. If NULL then look at all - * routes. + * sp + ssrc_p -> The route we are looking at. If NULL then look at all routes. * nhp -> The nexthop that is being tracked. * nh_num -> number of valid nexthops. * afi -> The afi we are working in. * vrf_id -> The vrf the nexthop is in. */ -extern void static_nht_update(struct prefix *sp, struct prefix *nhp, - uint32_t nh_num, afi_t afi, safi_t safi, +extern void static_nht_update(const struct prefix *sp, const struct prefix *ssrc_p, + struct prefix *nhp, uint32_t nh_num, afi_t afi, safi_t safi, vrf_id_t vrf_id); /* @@ -35,11 +34,10 @@ extern void static_nht_reset_start(struct prefix *nhp, afi_t afi, safi_t safi, vrf_id_t nh_vrf_id); /* - * For the given prefix, sp, mark it as in a particular state + * For the given prefix, sp + ssrc_p, mark it as in a particular state */ -extern void static_nht_mark_state(struct prefix *sp, safi_t safi, - vrf_id_t vrf_id, - enum static_install_states state); +extern void static_nht_mark_state(const struct prefix *sp, const struct prefix *ssrc_p, safi_t safi, + vrf_id_t vrf_id, enum static_install_states state); /* * For the given nexthop, returns the string diff --git a/staticd/static_routes.c b/staticd/static_routes.c index cba38183bb..cbe1c3c8c0 100644 --- a/staticd/static_routes.c +++ b/staticd/static_routes.c @@ -33,10 +33,6 @@ void zebra_stable_node_cleanup(struct route_table *table, struct static_nexthop *nh; struct static_path *pn; struct static_route_info *si; - struct route_table *src_table; - struct route_node *src_node; - struct static_path *src_pn; - struct static_route_info *src_si; si = node->info; @@ -50,36 +46,6 @@ void zebra_stable_node_cleanup(struct route_table *table, static_path_list_del(&si->path_list, pn); XFREE(MTYPE_STATIC_PATH, pn); } - - /* clean up for dst table */ - src_table = srcdest_srcnode_table(node); - if (src_table) { - /* This means the route_node is part of the top - * hierarchy and refers to a destination prefix. - */ - for (src_node = route_top(src_table); src_node; - src_node = route_next(src_node)) { - src_si = src_node->info; - - frr_each_safe(static_path_list, - &src_si->path_list, src_pn) { - frr_each_safe(static_nexthop_list, - &src_pn->nexthop_list, - nh) { - static_nexthop_list_del( - &src_pn->nexthop_list, - nh); - XFREE(MTYPE_STATIC_NEXTHOP, nh); - } - static_path_list_del(&src_si->path_list, - src_pn); - XFREE(MTYPE_STATIC_PATH, src_pn); - } - - XFREE(MTYPE_STATIC_ROUTE, src_node->info); - } - } - XFREE(MTYPE_STATIC_ROUTE, node->info); } } @@ -124,28 +90,10 @@ struct route_node *static_add_route(afi_t afi, safi_t safi, struct prefix *p, return rn; } -/* To delete the srcnodes */ -static void static_del_src_route(struct route_node *rn) -{ - struct static_path *pn; - struct static_route_info *si; - - si = rn->info; - - frr_each_safe(static_path_list, &si->path_list, pn) { - static_del_path(pn); - } - - XFREE(MTYPE_STATIC_ROUTE, rn->info); - route_unlock_node(rn); -} - void static_del_route(struct route_node *rn) { struct static_path *pn; struct static_route_info *si; - struct route_table *src_table; - struct route_node *src_node; si = rn->info; @@ -153,17 +101,6 @@ void static_del_route(struct route_node *rn) static_del_path(pn); } - /* clean up for dst table */ - src_table = srcdest_srcnode_table(rn); - if (src_table) { - /* This means the route_node is part of the top hierarchy - * and refers to a destination prefix. - */ - for (src_node = route_top(src_table); src_node; - src_node = route_next(src_node)) { - static_del_src_route(src_node); - } - } XFREE(MTYPE_STATIC_ROUTE, rn->info); route_unlock_node(rn); } @@ -477,7 +414,7 @@ static void static_fixup_vrf(struct vrf *vrf, struct route_table *stable, struct static_path *pn; struct static_route_info *si; - for (rn = route_top(stable); rn; rn = route_next(rn)) { + for (rn = route_top(stable); rn; rn = srcdest_route_next(rn)) { si = static_route_info_from_rnode(rn); if (!si) continue; @@ -517,7 +454,7 @@ static void static_enable_vrf(struct route_table *stable, afi_t afi, safi_t safi struct static_path *pn; struct static_route_info *si; - for (rn = route_top(stable); rn; rn = route_next(rn)) { + for (rn = route_top(stable); rn; rn = srcdest_route_next(rn)) { si = static_route_info_from_rnode(rn); if (!si) continue; @@ -575,7 +512,7 @@ static void static_cleanup_vrf(struct vrf *vrf, struct route_table *stable, struct static_path *pn; struct static_route_info *si; - for (rn = route_top(stable); rn; rn = route_next(rn)) { + for (rn = route_top(stable); rn; rn = srcdest_route_next(rn)) { si = static_route_info_from_rnode(rn); if (!si) continue; @@ -608,7 +545,7 @@ static void static_disable_vrf(struct route_table *stable, struct static_path *pn; struct static_route_info *si; - for (rn = route_top(stable); rn; rn = route_next(rn)) { + for (rn = route_top(stable); rn; rn = srcdest_route_next(rn)) { si = static_route_info_from_rnode(rn); if (!si) continue; diff --git a/staticd/static_srv6.c b/staticd/static_srv6.c new file mode 100644 index 0000000000..032bb9de9f --- /dev/null +++ b/staticd/static_srv6.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * STATICd - Segment Routing over IPv6 (SRv6) code + * Copyright (C) 2025 Alibaba Inc. + * Yuqing Zhao + * Lingyu Zhang + */ +#include <zebra.h> + +#include "vrf.h" +#include "nexthop.h" + +#include "static_routes.h" +#include "static_srv6.h" +#include "static_vrf.h" +#include "static_zebra.h" +#include "static_debug.h" + +/* + * List of SRv6 SIDs. + */ +struct list *srv6_locators; +struct list *srv6_sids; + +DEFINE_MTYPE_STATIC(STATIC, STATIC_SRV6_LOCATOR, "Static SRv6 locator"); +DEFINE_MTYPE_STATIC(STATIC, STATIC_SRV6_SID, "Static SRv6 SID"); + +/* + * When an interface is enabled in the kernel, go through all the static SRv6 SIDs in + * the system that use this interface and install/remove them in the zebra RIB. + * + * ifp - The interface being enabled + * is_up - Whether the interface is up or down + */ +void static_ifp_srv6_sids_update(struct interface *ifp, bool is_up) +{ + struct static_srv6_sid *sid; + struct listnode *node; + + if (!srv6_sids || !ifp) + return; + + DEBUGD(&static_dbg_srv6, "%s: Interface %s %s. %s SIDs that depend on the interface", + __func__, (is_up) ? "enabled" : "disabled", (is_up) ? "Removing" : "disabled", + ifp->name); + + /* + * iterate over the list of SRv6 SIDs and remove the SIDs that use this + * VRF from the zebra RIB + */ + for (ALL_LIST_ELEMENTS_RO(srv6_sids, node, sid)) { + if ((strcmp(sid->attributes.vrf_name, ifp->name) == 0) || + (strncmp(ifp->name, DEFAULT_SRV6_IFNAME, sizeof(ifp->name)) == 0 && + (sid->behavior == SRV6_ENDPOINT_BEHAVIOR_END || + sid->behavior == SRV6_ENDPOINT_BEHAVIOR_END_NEXT_CSID))) { + if (is_up) { + static_zebra_srv6_sid_install(sid); + SET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA); + } else { + static_zebra_srv6_sid_uninstall(sid); + UNSET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA); + } + } + } +} + +/* + * Allocate an SRv6 SID object and initialize the fields common to all the + * behaviors (i.e., SID address and behavor). + */ +struct static_srv6_sid *static_srv6_sid_alloc(struct prefix_ipv6 *addr) +{ + struct static_srv6_sid *sid = NULL; + + sid = XCALLOC(MTYPE_STATIC_SRV6_SID, sizeof(struct static_srv6_sid)); + sid->addr = *addr; + + return sid; +} + +void static_srv6_sid_free(struct static_srv6_sid *sid) +{ + XFREE(MTYPE_STATIC_SRV6_SID, sid); +} + +struct static_srv6_locator *static_srv6_locator_lookup(const char *name) +{ + struct static_srv6_locator *locator; + struct listnode *node; + + for (ALL_LIST_ELEMENTS_RO(srv6_locators, node, locator)) + if (!strncmp(name, locator->name, SRV6_LOCNAME_SIZE)) + return locator; + return NULL; +} + +/* + * Look-up an SRv6 SID in the list of SRv6 SIDs. + */ +struct static_srv6_sid *static_srv6_sid_lookup(struct prefix_ipv6 *sid_addr) +{ + struct static_srv6_sid *sid; + struct listnode *node; + + for (ALL_LIST_ELEMENTS_RO(srv6_sids, node, sid)) + if (memcmp(&sid->addr, sid_addr, sizeof(struct prefix_ipv6)) == 0) + return sid; + + return NULL; +} + +struct static_srv6_locator *static_srv6_locator_alloc(const char *name) +{ + struct static_srv6_locator *locator = NULL; + + locator = XCALLOC(MTYPE_STATIC_SRV6_LOCATOR, sizeof(struct static_srv6_locator)); + strlcpy(locator->name, name, sizeof(locator->name)); + + return locator; +} + +void static_srv6_locator_free(struct static_srv6_locator *locator) +{ + XFREE(MTYPE_STATIC_SRV6_LOCATOR, locator); +} + +void delete_static_srv6_locator(void *val) +{ + static_srv6_locator_free((struct static_srv6_locator *)val); +} + +/* + * Remove an SRv6 SID from the zebra RIB (if it was previously installed) and + * release the memory previously allocated for the SID. + */ +void static_srv6_sid_del(struct static_srv6_sid *sid) +{ + if (CHECK_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_VALID)) { + static_zebra_release_srv6_sid(sid); + UNSET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_VALID); + } + + if (CHECK_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA)) { + static_zebra_srv6_sid_uninstall(sid); + UNSET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA); + } + + XFREE(MTYPE_STATIC_SRV6_SID, sid); +} + +void delete_static_srv6_sid(void *val) +{ + static_srv6_sid_free((struct static_srv6_sid *)val); +} + +/* + * Initialize SRv6 data structures. + */ +void static_srv6_init(void) +{ + srv6_locators = list_new(); + srv6_locators->del = delete_static_srv6_locator; + srv6_sids = list_new(); + srv6_sids->del = delete_static_srv6_sid; +} + +/* + * Clean up all the SRv6 data structures. + */ +void static_srv6_cleanup(void) +{ + list_delete(&srv6_locators); + list_delete(&srv6_sids); +} diff --git a/staticd/static_srv6.h b/staticd/static_srv6.h new file mode 100644 index 0000000000..48986092ae --- /dev/null +++ b/staticd/static_srv6.h @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * STATICd - Segment Routing over IPv6 (SRv6) header + * Copyright (C) 2025 Alibaba Inc. + * Yuqing Zhao + * Lingyu Zhang + */ +#ifndef __STATIC_SRV6_H__ +#define __STATIC_SRV6_H__ + +#include "vrf.h" +#include "srv6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Attributes for an SRv6 SID */ +struct static_srv6_sid_attributes { + /* VRF name */ + char vrf_name[VRF_NAMSIZ]; + char ifname[IFNAMSIZ]; + struct in6_addr nh6; +}; + +/* Static SRv6 SID */ +struct static_srv6_sid { + /* SRv6 SID address */ + struct prefix_ipv6 addr; + /* behavior bound to the SRv6 SID */ + enum srv6_endpoint_behavior_codepoint behavior; + /* SID attributes */ + struct static_srv6_sid_attributes attributes; + + /* SRv6 SID flags */ + uint8_t flags; +/* + * this SRv6 SID has been allocated by SID Manager + * and can be installed in the zebra RIB + */ +#define STATIC_FLAG_SRV6_SID_VALID (1 << 0) +/* this SRv6 SID has been installed in the zebra RIB */ +#define STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA (1 << 1) + + char locator_name[SRV6_LOCNAME_SIZE]; + struct static_srv6_locator *locator; +}; + +struct static_srv6_locator { + char name[SRV6_LOCNAME_SIZE]; + struct prefix_ipv6 prefix; + + /* + * Bit length of SRv6 locator described in + * draft-ietf-bess-srv6-services-05#section-3.2.1 + */ + uint8_t block_bits_length; + uint8_t node_bits_length; + uint8_t function_bits_length; + uint8_t argument_bits_length; + + uint8_t flags; +}; + +/* List of SRv6 SIDs. */ +extern struct list *srv6_locators; +extern struct list *srv6_sids; + +/* + * Allocate an SRv6 SID object and initialize its fields, SID address and + * behavor. + */ +extern struct static_srv6_sid *static_srv6_sid_alloc(struct prefix_ipv6 *addr); +extern void static_srv6_sid_free(struct static_srv6_sid *sid); +/* Look-up an SRv6 SID in the list of SRv6 SIDs. */ +extern struct static_srv6_sid *static_srv6_sid_lookup(struct prefix_ipv6 *sid_addr); +/* + * Remove an SRv6 SID from the zebra RIB (if it was previously installed) and + * release the memory previously allocated for the SID. + */ +extern void static_srv6_sid_del(struct static_srv6_sid *sid); + +/* Initialize SRv6 data structures. */ +extern void static_srv6_init(void); +/* Clean up all the SRv6 data structures. */ +extern void static_srv6_cleanup(void); + +/* + * When an interface is enabled in the kernel, go through all the static SRv6 SIDs in + * the system that use this interface and install/remove them in the zebra RIB. + * + * ifp - The interface being enabled + * is_up - Whether the interface is up or down + */ +void static_ifp_srv6_sids_update(struct interface *ifp, bool is_up); + +struct static_srv6_locator *static_srv6_locator_alloc(const char *name); +void static_srv6_locator_free(struct static_srv6_locator *locator); +struct static_srv6_locator *static_srv6_locator_lookup(const char *name); + +void delete_static_srv6_sid(void *val); +void delete_static_srv6_locator(void *val); + +#ifdef __cplusplus +} +#endif + +#endif /* __STATIC_SRV6_H__ */ diff --git a/staticd/static_vrf.c b/staticd/static_vrf.c index 710827a9ff..78bc30500b 100644 --- a/staticd/static_vrf.c +++ b/staticd/static_vrf.c @@ -51,10 +51,8 @@ struct static_vrf *static_vrf_alloc(const char *name) for (afi = AFI_IP; afi <= AFI_IP6; afi++) { for (safi = SAFI_UNICAST; safi <= SAFI_MULTICAST; safi++) { - if (afi == AFI_IP6) - table = srcdest_table_init(); - else - table = route_table_init(); + table = srcdest_table_init(); + table->cleanup = zebra_stable_node_cleanup; info = XCALLOC(MTYPE_STATIC_RTABLE_INFO, sizeof(struct stable_info)); @@ -63,7 +61,6 @@ struct static_vrf *static_vrf_alloc(const char *name) info->safi = safi; route_table_set_info(table, info); - table->cleanup = zebra_stable_node_cleanup; svrf->stable[afi][safi] = table; } } diff --git a/staticd/static_vty.c b/staticd/static_vty.c index 07b8bc3d28..f93e81e8dc 100644 --- a/staticd/static_vty.c +++ b/staticd/static_vty.c @@ -27,6 +27,8 @@ #include "static_debug.h" #include "staticd/static_vty_clippy.c" #include "static_nb.h" +#include "static_srv6.h" +#include "static_zebra.h" #define STATICD_STR "Static route daemon\n" @@ -77,7 +79,7 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args) char xpath_seg[XPATH_MAXLEN]; char ab_xpath[XPATH_MAXLEN]; char buf_prefix[PREFIX_STRLEN]; - char buf_src_prefix[PREFIX_STRLEN] = {}; + char buf_src_prefix[PREFIX_STRLEN] = "::/0"; char buf_nh_type[PREFIX_STRLEN] = {}; char buf_tag[PREFIX_STRLEN]; uint8_t label_stack_id = 0; @@ -114,6 +116,7 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args) } assert(!!str2prefix(args->prefix, &p)); + src = (struct prefix){ .family = p.family, .prefixlen = 0 }; switch (args->afi) { case AFI_IP: @@ -144,7 +147,7 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args) return CMD_WARNING_CONFIG_FAILED; } - if (args->source) + if (src.prefixlen) prefix2str(&src, buf_src_prefix, sizeof(buf_src_prefix)); if (args->gateway) buf_gate_str = args->gateway; @@ -181,25 +184,10 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args) static_get_nh_type(type, buf_nh_type, sizeof(buf_nh_type)); if (!args->delete) { - if (args->source) - snprintf(ab_xpath, sizeof(ab_xpath), - FRR_DEL_S_ROUTE_SRC_NH_KEY_NO_DISTANCE_XPATH, - "frr-staticd:staticd", "staticd", args->vrf, - buf_prefix, - yang_afi_safi_value2identity(args->afi, - args->safi), - buf_src_prefix, table_id, buf_nh_type, - args->nexthop_vrf, buf_gate_str, - args->interface_name); - else - snprintf(ab_xpath, sizeof(ab_xpath), - FRR_DEL_S_ROUTE_NH_KEY_NO_DISTANCE_XPATH, - "frr-staticd:staticd", "staticd", args->vrf, - buf_prefix, - yang_afi_safi_value2identity(args->afi, - args->safi), - table_id, buf_nh_type, args->nexthop_vrf, - buf_gate_str, args->interface_name); + snprintf(ab_xpath, sizeof(ab_xpath), FRR_DEL_S_ROUTE_NH_KEY_NO_DISTANCE_XPATH, + "frr-staticd:staticd", "staticd", args->vrf, buf_prefix, buf_src_prefix, + yang_afi_safi_value2identity(args->afi, args->safi), table_id, buf_nh_type, + args->nexthop_vrf, buf_gate_str, args->interface_name); /* * If there's already the same nexthop but with a different @@ -216,22 +204,9 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args) } /* route + path procesing */ - if (args->source) - snprintf(xpath_prefix, sizeof(xpath_prefix), - FRR_S_ROUTE_SRC_INFO_KEY_XPATH, - "frr-staticd:staticd", "staticd", args->vrf, - buf_prefix, - yang_afi_safi_value2identity(args->afi, - args->safi), - buf_src_prefix, table_id, distance); - else - snprintf(xpath_prefix, sizeof(xpath_prefix), - FRR_STATIC_ROUTE_INFO_KEY_XPATH, - "frr-staticd:staticd", "staticd", args->vrf, - buf_prefix, - yang_afi_safi_value2identity(args->afi, - args->safi), - table_id, distance); + snprintf(xpath_prefix, sizeof(xpath_prefix), FRR_STATIC_ROUTE_INFO_KEY_XPATH, + "frr-staticd:staticd", "staticd", args->vrf, buf_prefix, buf_src_prefix, + yang_afi_safi_value2identity(args->afi, args->safi), table_id, distance); nb_cli_enqueue_change(vty, xpath_prefix, NB_OP_CREATE, NULL); @@ -410,51 +385,18 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args) if (orig_seg) XFREE(MTYPE_TMP, orig_seg); } else { - if (args->source) { - if (args->distance) - snprintf(ab_xpath, sizeof(ab_xpath), - FRR_DEL_S_ROUTE_SRC_NH_KEY_XPATH, - "frr-staticd:staticd", "staticd", - args->vrf, buf_prefix, - yang_afi_safi_value2identity( - args->afi, args->safi), - buf_src_prefix, table_id, distance, - buf_nh_type, args->nexthop_vrf, - buf_gate_str, args->interface_name); - else - snprintf( - ab_xpath, sizeof(ab_xpath), - FRR_DEL_S_ROUTE_SRC_NH_KEY_NO_DISTANCE_XPATH, - "frr-staticd:staticd", "staticd", - args->vrf, buf_prefix, - yang_afi_safi_value2identity( - args->afi, args->safi), - buf_src_prefix, table_id, buf_nh_type, - args->nexthop_vrf, buf_gate_str, - args->interface_name); - } else { - if (args->distance) - snprintf(ab_xpath, sizeof(ab_xpath), - FRR_DEL_S_ROUTE_NH_KEY_XPATH, - "frr-staticd:staticd", "staticd", - args->vrf, buf_prefix, - yang_afi_safi_value2identity( - args->afi, args->safi), - table_id, distance, buf_nh_type, - args->nexthop_vrf, buf_gate_str, - args->interface_name); - else - snprintf( - ab_xpath, sizeof(ab_xpath), - FRR_DEL_S_ROUTE_NH_KEY_NO_DISTANCE_XPATH, - "frr-staticd:staticd", "staticd", - args->vrf, buf_prefix, - yang_afi_safi_value2identity( - args->afi, args->safi), - table_id, buf_nh_type, - args->nexthop_vrf, buf_gate_str, - args->interface_name); - } + if (args->distance) + snprintf(ab_xpath, sizeof(ab_xpath), FRR_DEL_S_ROUTE_NH_KEY_XPATH, + "frr-staticd:staticd", "staticd", args->vrf, buf_prefix, + buf_src_prefix, yang_afi_safi_value2identity(args->afi, args->safi), + table_id, distance, buf_nh_type, args->nexthop_vrf, buf_gate_str, + args->interface_name); + else + snprintf(ab_xpath, sizeof(ab_xpath), + FRR_DEL_S_ROUTE_NH_KEY_NO_DISTANCE_XPATH, "frr-staticd:staticd", + "staticd", args->vrf, buf_prefix, buf_src_prefix, + yang_afi_safi_value2identity(args->afi, args->safi), table_id, + buf_nh_type, args->nexthop_vrf, buf_gate_str, args->interface_name); dnode = yang_dnode_get(vty->candidate_config->dnode, ab_xpath); if (!dnode) { @@ -1201,8 +1143,167 @@ DEFPY_YANG(ipv6_route_vrf, ipv6_route_vrf_cmd, return static_route_nb_run(vty, &args); } +DEFUN_NOSH (static_segment_routing, static_segment_routing_cmd, + "segment-routing", + "Segment Routing\n") +{ + VTY_PUSH_CONTEXT_NULL(SEGMENT_ROUTING_NODE); + return CMD_SUCCESS; +} + +DEFUN_NOSH (static_srv6, static_srv6_cmd, + "srv6", + "Segment Routing SRv6\n") +{ + VTY_PUSH_CONTEXT_NULL(SRV6_NODE); + return CMD_SUCCESS; +} + +DEFUN_YANG_NOSH (no_static_srv6, no_static_srv6_cmd, + "no srv6", + NO_STR + "Segment Routing SRv6\n") +{ + char xpath[XPATH_MAXLEN]; + + snprintf(xpath, sizeof(xpath), FRR_STATIC_SRV6_INFO_KEY_XPATH, "frr-staticd:staticd", + "staticd", VRF_DEFAULT_NAME); + + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, "%s", xpath); +} + +DEFUN_NOSH (static_srv6_sids, static_srv6_sids_cmd, + "static-sids", + "Segment Routing SRv6 SIDs\n") +{ + VTY_PUSH_CONTEXT_NULL(SRV6_SIDS_NODE); + return CMD_SUCCESS; +} + +DEFPY_YANG(srv6_sid, srv6_sid_cmd, + "sid X:X::X:X/M locator NAME$locator_name behavior <uN | uDT6 vrf VIEWVRFNAME | uDT4 vrf VIEWVRFNAME | uDT46 vrf VIEWVRFNAME>", + "Configure SRv6 SID\n" + "Specify SRv6 SID\n" + "Locator name\n" + "Specify Locator name\n" + "Specify SRv6 SID behavior\n" + "Apply the code to a uN SID\n" + "Apply the code to an uDT6 SID\n" + "Configure VRF name\n" + "Specify VRF name\n" + "Apply the code to an uDT4 SID\n" + "Configure VRF name\n" + "Specify VRF name\n" + "Apply the code to an uDT46 SID\n" + "Configure VRF name\n" + "Specify VRF name\n") +{ + enum srv6_endpoint_behavior_codepoint behavior = SRV6_ENDPOINT_BEHAVIOR_RESERVED; + int idx = 0; + const char *vrf_name = NULL; + char xpath_srv6[XPATH_MAXLEN]; + char xpath_sid[XPATH_MAXLEN]; + char xpath_behavior[XPATH_MAXLEN]; + char xpath_vrf_name[XPATH_MAXLEN]; + char xpath_locator_name[XPATH_MAXLEN]; + + if (argv_find(argv, argc, "uN", &idx)) { + behavior = SRV6_ENDPOINT_BEHAVIOR_END_NEXT_CSID; + } else if (argv_find(argv, argc, "uDT6", &idx)) { + behavior = SRV6_ENDPOINT_BEHAVIOR_END_DT6_USID; + vrf_name = argv[idx + 2]->arg; + } else if (argv_find(argv, argc, "uDT4", &idx)) { + behavior = SRV6_ENDPOINT_BEHAVIOR_END_DT4_USID; + vrf_name = argv[idx + 2]->arg; + } else if (argv_find(argv, argc, "uDT46", &idx)) { + behavior = SRV6_ENDPOINT_BEHAVIOR_END_DT46_USID; + vrf_name = argv[idx + 2]->arg; + } + + snprintf(xpath_srv6, sizeof(xpath_srv6), FRR_STATIC_SRV6_INFO_KEY_XPATH, + "frr-staticd:staticd", "staticd", VRF_DEFAULT_NAME); + + snprintf(xpath_sid, sizeof(xpath_sid), FRR_STATIC_SRV6_SID_KEY_XPATH, "frr-staticd:staticd", + "staticd", VRF_DEFAULT_NAME, sid_str); + + strlcpy(xpath_behavior, xpath_sid, sizeof(xpath_behavior)); + strlcat(xpath_behavior, FRR_STATIC_SRV6_SID_BEHAVIOR_XPATH, sizeof(xpath_behavior)); + + nb_cli_enqueue_change(vty, xpath_sid, NB_OP_CREATE, sid_str); + + nb_cli_enqueue_change(vty, xpath_behavior, NB_OP_MODIFY, + srv6_endpoint_behavior_codepoint2str(behavior)); + + if (vrf_name) { + strlcpy(xpath_vrf_name, xpath_sid, sizeof(xpath_vrf_name)); + strlcat(xpath_vrf_name, FRR_STATIC_SRV6_SID_VRF_NAME_XPATH, sizeof(xpath_vrf_name)); + + nb_cli_enqueue_change(vty, xpath_vrf_name, NB_OP_MODIFY, vrf_name); + } + + strlcpy(xpath_locator_name, xpath_sid, sizeof(xpath_locator_name)); + strlcat(xpath_locator_name, FRR_STATIC_SRV6_SID_LOCATOR_NAME_XPATH, + sizeof(xpath_locator_name)); + + nb_cli_enqueue_change(vty, xpath_locator_name, NB_OP_MODIFY, locator_name); + + return nb_cli_apply_changes(vty, "%s", xpath_sid); +} + +DEFPY_YANG(no_srv6_sid, no_srv6_sid_cmd, + "no sid X:X::X:X/M [locator NAME$locator_name] [behavior <uN | uDT6 vrf VIEWVRFNAME | uDT4 vrf VIEWVRFNAME | uDT46 vrf VIEWVRFNAME>]", + NO_STR + "Configure SRv6 SID\n" + "Specify SRv6 SID\n" + "Locator name\n" + "Specify Locator name\n" + "Specify SRv6 SID behavior\n" + "Apply the code to a uN SID\n" + "Apply the code to an uDT6 SID\n" + "Configure VRF name\n" + "Specify VRF name\n" + "Apply the code to an uDT4 SID\n" + "Configure VRF name\n" + "Specify VRF name\n" + "Apply the code to an uDT46 SID\n" + "Configure VRF name\n" + "Specify VRF name\n") +{ + char xpath[XPATH_MAXLEN + 37]; + + snprintf(xpath, sizeof(xpath), FRR_STATIC_SRV6_SID_KEY_XPATH, "frr-staticd:staticd", + "staticd", VRF_DEFAULT_NAME, sid_str); + + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); +} + #ifdef INCLUDE_MGMTD_CMDDEFS_ONLY +static struct cmd_node sr_node = { + .name = "sr", + .node = SEGMENT_ROUTING_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-sr)# ", +}; + +static struct cmd_node srv6_node = { + .name = "srv6", + .node = SRV6_NODE, + .parent_node = SEGMENT_ROUTING_NODE, + .prompt = "%s(config-srv6)# ", +}; + +static struct cmd_node srv6_sids_node = { + .name = "srv6-sids", + .node = SRV6_SIDS_NODE, + .parent_node = SRV6_NODE, + .prompt = "%s(config-srv6-sids)# ", +}; + static void static_cli_show(struct vty *vty, const struct lyd_node *dnode, bool show_defaults) { @@ -1278,9 +1379,8 @@ static int srv6_seg_iter_cb(const struct lyd_node *dnode, void *arg) } static void nexthop_cli_show(struct vty *vty, const struct lyd_node *route, - const struct lyd_node *src, - const struct lyd_node *path, - const struct lyd_node *nexthop, bool show_defaults) + const struct lyd_node *path, const struct lyd_node *nexthop, + bool show_defaults) { const char *vrf; const char *afi_safi; @@ -1294,6 +1394,7 @@ static void nexthop_cli_show(struct vty *vty, const struct lyd_node *route, struct srv6_seg_iter seg_iter; const char *nexthop_vrf; uint32_t table_id; + struct prefix src_prefix; bool onlink; vrf = yang_dnode_get_string(route, "../../vrf"); @@ -1315,9 +1416,9 @@ static void nexthop_cli_show(struct vty *vty, const struct lyd_node *route, vty_out(vty, " %s", yang_dnode_get_string(route, "prefix")); - if (src) - vty_out(vty, " from %s", - yang_dnode_get_string(src, "src-prefix")); + yang_dnode_get_prefix(&src_prefix, route, "src-prefix"); + if (src_prefix.prefixlen) + vty_out(vty, " from %pFX", &src_prefix); nh_type = yang_dnode_get_enum(nexthop, "nh-type"); switch (nh_type) { @@ -1421,18 +1522,7 @@ static void static_nexthop_cli_show(struct vty *vty, const struct lyd_node *route = yang_dnode_get_parent(path, "route-list"); - nexthop_cli_show(vty, route, NULL, path, dnode, show_defaults); -} - -static void static_src_nexthop_cli_show(struct vty *vty, - const struct lyd_node *dnode, - bool show_defaults) -{ - const struct lyd_node *path = yang_dnode_get_parent(dnode, "path-list"); - const struct lyd_node *src = yang_dnode_get_parent(path, "src-list"); - const struct lyd_node *route = yang_dnode_get_parent(src, "route-list"); - - nexthop_cli_show(vty, route, src, path, dnode, show_defaults); + nexthop_cli_show(vty, route, path, dnode, show_defaults); } static int static_nexthop_cli_cmp(const struct lyd_node *dnode1, @@ -1497,6 +1587,8 @@ static int static_route_list_cli_cmp(const struct lyd_node *dnode1, afi_t afi1, afi2; safi_t safi1, safi2; struct prefix prefix1, prefix2; + struct prefix src_prefix1, src_prefix2; + int rv; afi_safi1 = yang_dnode_get_string(dnode1, "afi-safi"); yang_afi_safi_identity2value(afi_safi1, &afi1, &safi1); @@ -1512,19 +1604,13 @@ static int static_route_list_cli_cmp(const struct lyd_node *dnode1, yang_dnode_get_prefix(&prefix1, dnode1, "prefix"); yang_dnode_get_prefix(&prefix2, dnode2, "prefix"); + rv = prefix_cmp(&prefix1, &prefix2); + if (rv) + return rv; - return prefix_cmp(&prefix1, &prefix2); -} - -static int static_src_list_cli_cmp(const struct lyd_node *dnode1, - const struct lyd_node *dnode2) -{ - struct prefix prefix1, prefix2; - - yang_dnode_get_prefix(&prefix1, dnode1, "src-prefix"); - yang_dnode_get_prefix(&prefix2, dnode2, "src-prefix"); - - return prefix_cmp(&prefix1, &prefix2); + yang_dnode_get_prefix(&src_prefix1, dnode1, "src-prefix"); + yang_dnode_get_prefix(&src_prefix2, dnode2, "src-prefix"); + return prefix_cmp(&src_prefix1, &src_prefix2); } static int static_path_list_cli_cmp(const struct lyd_node *dnode1, @@ -1545,6 +1631,100 @@ static int static_path_list_cli_cmp(const struct lyd_node *dnode1, return (int)distance1 - (int)distance2; } +static void static_segment_routing_cli_show(struct vty *vty, const struct lyd_node *dnode, + bool show_defaults) +{ + vty_out(vty, "segment-routing\n"); +} + +static void static_segment_routing_cli_show_end(struct vty *vty, const struct lyd_node *dnode) +{ + vty_out(vty, "exit\n"); + vty_out(vty, "!\n"); +} + +static void static_srv6_cli_show(struct vty *vty, const struct lyd_node *dnode, bool show_defaults) +{ + vty_out(vty, " srv6\n"); +} + +static void static_srv6_cli_show_end(struct vty *vty, const struct lyd_node *dnode) +{ + vty_out(vty, " exit\n"); + vty_out(vty, " !\n"); +} + +static void static_sids_cli_show(struct vty *vty, const struct lyd_node *dnode, bool show_defaults) +{ + vty_out(vty, " static-sids\n"); +} + +static void static_sids_cli_show_end(struct vty *vty, const struct lyd_node *dnode) +{ + vty_out(vty, " exit\n"); + vty_out(vty, " !\n"); +} + +static void srv6_sid_cli_show(struct vty *vty, const struct lyd_node *sid, bool show_defaults) +{ + enum srv6_endpoint_behavior_codepoint srv6_behavior; + struct prefix_ipv6 sid_value; + + yang_dnode_get_ipv6p(&sid_value, sid, "sid"); + + vty_out(vty, " sid %pFX", &sid_value); + vty_out(vty, " locator %s", yang_dnode_get_string(sid, "locator-name")); + + srv6_behavior = yang_dnode_get_enum(sid, "behavior"); + switch (srv6_behavior) { + case SRV6_ENDPOINT_BEHAVIOR_END: + vty_out(vty, " behavior End"); + break; + case SRV6_ENDPOINT_BEHAVIOR_END_X: + vty_out(vty, " behavior End.X"); + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT6: + vty_out(vty, " behavior End.DT6"); + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT4: + vty_out(vty, " behavior End.DT4"); + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT46: + vty_out(vty, " behavior End.DT46"); + break; + case SRV6_ENDPOINT_BEHAVIOR_END_NEXT_CSID: + vty_out(vty, " behavior uN"); + break; + case SRV6_ENDPOINT_BEHAVIOR_END_X_NEXT_CSID: + vty_out(vty, " behavior uA"); + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT6_USID: + vty_out(vty, " behavior uDT6"); + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT4_USID: + vty_out(vty, " behavior uDT4"); + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT46_USID: + vty_out(vty, " behavior uDT46"); + break; + case SRV6_ENDPOINT_BEHAVIOR_RESERVED: + case SRV6_ENDPOINT_BEHAVIOR_OPAQUE: + vty_out(vty, " behavior unknown"); + break; + } + + if (yang_dnode_exists(sid, "vrf-name")) + vty_out(vty, " vrf %s", yang_dnode_get_string(sid, "vrf-name")); + + vty_out(vty, "\n"); +} + +static void static_srv6_sid_cli_show(struct vty *vty, const struct lyd_node *dnode, + bool show_defaults) +{ + srv6_sid_cli_show(vty, dnode, show_defaults); +} + const struct frr_yang_module_info frr_staticd_cli_info = { .name = "frr-staticd", .ignore_cfg_cbs = true, @@ -1576,22 +1756,30 @@ const struct frr_yang_module_info frr_staticd_cli_info = { } }, { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list", + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing", .cbs = { - .cli_cmp = static_src_list_cli_cmp, + .cli_show = static_segment_routing_cli_show, + .cli_show_end = static_segment_routing_cli_show_end, } }, { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list", + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6", .cbs = { - .cli_cmp = static_path_list_cli_cmp, + .cli_show = static_srv6_cli_show, + .cli_show_end = static_srv6_cli_show_end, } }, { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop", + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6/static-sids", .cbs = { - .cli_show = static_src_nexthop_cli_show, - .cli_cmp = static_nexthop_cli_cmp, + .cli_show = static_sids_cli_show, + .cli_show_end = static_sids_cli_show_end, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6/static-sids/sid", + .cbs = { + .cli_show = static_srv6_sid_cli_show, } }, { @@ -1603,17 +1791,18 @@ const struct frr_yang_module_info frr_staticd_cli_info = { #else /* ifdef INCLUDE_MGMTD_CMDDEFS_ONLY */ DEFPY_YANG(debug_staticd, debug_staticd_cmd, - "[no] debug static [{events$events|route$route|bfd$bfd}]", + "[no] debug static [{events$events|route$route|bfd$bfd|srv6$srv6}]", NO_STR DEBUG_STR STATICD_STR "Debug events\n" "Debug route\n" - "Debug bfd\n") + "Debug bfd\n" + "Debug srv6\n") { /* If no specific category, change all */ if (strmatch(argv[argc - 1]->text, "static")) - static_debug_set(vty->node, !no, true, true, true); + static_debug_set(vty->node, !no, true, true, true, true); else - static_debug_set(vty->node, !no, !!events, !!route, !!bfd); + static_debug_set(vty->node, !no, !!events, !!route, !!bfd, !!srv6); return CMD_SUCCESS; } @@ -1669,6 +1858,21 @@ void static_vty_init(void) install_element(VRF_NODE, &ipv6_route_address_interface_vrf_cmd); install_element(CONFIG_NODE, &ipv6_route_cmd); install_element(VRF_NODE, &ipv6_route_vrf_cmd); + + install_node(&sr_node); + install_node(&srv6_node); + install_node(&srv6_sids_node); + install_default(SEGMENT_ROUTING_NODE); + install_default(SRV6_NODE); + install_default(SRV6_SIDS_NODE); + + install_element(CONFIG_NODE, &static_segment_routing_cmd); + install_element(SEGMENT_ROUTING_NODE, &static_srv6_cmd); + install_element(SEGMENT_ROUTING_NODE, &no_static_srv6_cmd); + install_element(SRV6_NODE, &static_srv6_sids_cmd); + install_element(SRV6_SIDS_NODE, &srv6_sid_cmd); + install_element(SRV6_SIDS_NODE, &no_srv6_sid_cmd); + #endif /* ifndef INCLUDE_MGMTD_CMDDEFS_ONLY */ #ifndef INCLUDE_MGMTD_CMDDEFS_ONLY diff --git a/staticd/static_zebra.c b/staticd/static_zebra.c index d76befc131..6da2dfec90 100644 --- a/staticd/static_zebra.c +++ b/staticd/static_zebra.c @@ -30,6 +30,9 @@ #include "static_nht.h" #include "static_vty.h" #include "static_debug.h" +#include "zclient.h" +#include "static_srv6.h" +#include "lib_errors.h" DEFINE_MTYPE_STATIC(STATIC, STATIC_NHT_DATA, "Static Nexthop tracking data"); PREDECL_HASH(static_nht_hash); @@ -113,6 +116,8 @@ static int static_ifp_up(struct interface *ifp) { static_ifindex_update(ifp, true); + static_ifp_srv6_sids_update(ifp, true); + return 0; } @@ -120,40 +125,44 @@ static int static_ifp_down(struct interface *ifp) { static_ifindex_update(ifp, false); + static_ifp_srv6_sids_update(ifp, false); + return 0; } static int route_notify_owner(ZAPI_CALLBACK_ARGS) { - struct prefix p; + struct prefix p, src_p, *src_pp; enum zapi_route_notify_owner note; uint32_t table_id; safi_t safi; - if (!zapi_route_notify_decode(zclient->ibuf, &p, &table_id, ¬e, NULL, - &safi)) + if (!zapi_route_notify_decode_srcdest(zclient->ibuf, &p, &src_p, &table_id, ¬e, NULL, + &safi)) return -1; + src_pp = src_p.prefixlen ? &src_p : NULL; + switch (note) { case ZAPI_ROUTE_FAIL_INSTALL: - static_nht_mark_state(&p, safi, vrf_id, STATIC_NOT_INSTALLED); + static_nht_mark_state(&p, src_pp, safi, vrf_id, STATIC_NOT_INSTALLED); zlog_warn("%s: Route %pFX failed to install for table: %u", __func__, &p, table_id); break; case ZAPI_ROUTE_BETTER_ADMIN_WON: - static_nht_mark_state(&p, safi, vrf_id, STATIC_NOT_INSTALLED); + static_nht_mark_state(&p, src_pp, safi, vrf_id, STATIC_NOT_INSTALLED); zlog_warn( "%s: Route %pFX over-ridden by better route for table: %u", __func__, &p, table_id); break; case ZAPI_ROUTE_INSTALLED: - static_nht_mark_state(&p, safi, vrf_id, STATIC_INSTALLED); + static_nht_mark_state(&p, src_pp, safi, vrf_id, STATIC_INSTALLED); break; case ZAPI_ROUTE_REMOVED: - static_nht_mark_state(&p, safi, vrf_id, STATIC_NOT_INSTALLED); + static_nht_mark_state(&p, src_pp, safi, vrf_id, STATIC_NOT_INSTALLED); break; case ZAPI_ROUTE_REMOVE_FAIL: - static_nht_mark_state(&p, safi, vrf_id, STATIC_INSTALLED); + static_nht_mark_state(&p, src_pp, safi, vrf_id, STATIC_INSTALLED); zlog_warn("%s: Route %pFX failure to remove for table: %u", __func__, &p, table_id); break; @@ -219,8 +228,8 @@ static void static_zebra_nexthop_update(struct vrf *vrf, struct prefix *matched, nhtd->nh_num = nhr->nexthop_num; static_nht_reset_start(matched, afi, nhr->safi, nhtd->nh_vrf_id); - static_nht_update(NULL, matched, nhr->nexthop_num, afi, - nhr->safi, nhtd->nh_vrf_id); + static_nht_update(NULL, NULL, matched, nhr->nexthop_num, afi, nhr->safi, + nhtd->nh_vrf_id); } else zlog_err("No nhtd?"); } @@ -305,10 +314,13 @@ void static_zebra_nht_register(struct static_nexthop *nh, bool reg) { struct static_path *pn = nh->pn; struct route_node *rn = pn->rn; + const struct prefix *p, *src_p; struct static_route_info *si = static_route_info_from_rnode(rn); struct static_nht_data *nhtd, lookup = {}; uint32_t cmd; + srcdest_rnode_prefixes(rn, &p, &src_p); + if (!static_zebra_nht_get_prefix(nh, &lookup.nh)) return; lookup.nh_vrf_id = nh->nh_vrf_id; @@ -344,8 +356,8 @@ void static_zebra_nht_register(struct static_nexthop *nh, bool reg) if (nh->state == STATIC_NOT_INSTALLED || nh->state == STATIC_SENT_TO_ZEBRA) nh->state = STATIC_START; - static_nht_update(&rn->p, &nhtd->nh, nhtd->nh_num, afi, - si->safi, nh->nh_vrf_id); + static_nht_update(p, src_p, &nhtd->nh, nhtd->nh_num, afi, si->safi, + nh->nh_vrf_id); return; } @@ -530,10 +542,673 @@ extern void static_zebra_route_add(struct static_path *pn, bool install) zclient, &api); } +/** + * Send SRv6 SID to ZEBRA for installation or deletion. + * + * @param cmd ZEBRA_ROUTE_ADD or ZEBRA_ROUTE_DELETE + * @param sid SRv6 SID to install or delete + * @param prefixlen Prefix length + * @param oif Outgoing interface + * @param action SID action + * @param context SID context + */ +static void static_zebra_send_localsid(int cmd, const struct in6_addr *sid, uint16_t prefixlen, + ifindex_t oif, enum seg6local_action_t action, + const struct seg6local_context *context) +{ + struct prefix_ipv6 p = {}; + struct zapi_route api = {}; + struct zapi_nexthop *znh; + + if (cmd != ZEBRA_ROUTE_ADD && cmd != ZEBRA_ROUTE_DELETE) { + flog_warn(EC_LIB_DEVELOPMENT, "%s: wrong ZEBRA command", __func__); + return; + } + + if (prefixlen > IPV6_MAX_BITLEN) { + flog_warn(EC_LIB_DEVELOPMENT, "%s: wrong prefixlen %u", __func__, prefixlen); + return; + } + + DEBUGD(&static_dbg_srv6, "%s: |- %s SRv6 SID %pI6 behavior %s", __func__, + cmd == ZEBRA_ROUTE_ADD ? "Add" : "Delete", sid, seg6local_action2str(action)); + + p.family = AF_INET6; + p.prefixlen = prefixlen; + p.prefix = *sid; + + api.vrf_id = VRF_DEFAULT; + api.type = ZEBRA_ROUTE_STATIC; + api.instance = 0; + api.safi = SAFI_UNICAST; + memcpy(&api.prefix, &p, sizeof(p)); + + if (cmd == ZEBRA_ROUTE_DELETE) + return (void)zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api); + + SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION); + SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP); + + znh = &api.nexthops[0]; + + memset(znh, 0, sizeof(*znh)); + + znh->type = NEXTHOP_TYPE_IFINDEX; + znh->ifindex = oif; + SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL); + znh->seg6local_action = action; + memcpy(&znh->seg6local_ctx, context, sizeof(struct seg6local_context)); + + api.nexthop_num = 1; + + zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api); +} + +/** + * Install SRv6 SID in the forwarding plane through Zebra. + * + * @param sid SRv6 SID + */ +void static_zebra_srv6_sid_install(struct static_srv6_sid *sid) +{ + enum seg6local_action_t action = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC; + struct seg6local_context ctx = {}; + struct interface *ifp = NULL; + struct vrf *vrf; + + if (!sid) + return; + + if (CHECK_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA)) + return; + + if (!sid->locator) { + zlog_err("Failed to install SID %pFX: missing locator information", &sid->addr); + return; + } + + switch (sid->behavior) { + case SRV6_ENDPOINT_BEHAVIOR_END: + action = ZEBRA_SEG6_LOCAL_ACTION_END; + break; + case SRV6_ENDPOINT_BEHAVIOR_END_NEXT_CSID: + action = ZEBRA_SEG6_LOCAL_ACTION_END; + SET_SRV6_FLV_OP(ctx.flv.flv_ops, ZEBRA_SEG6_LOCAL_FLV_OP_NEXT_CSID); + ctx.flv.lcblock_len = sid->locator->block_bits_length; + ctx.flv.lcnode_func_len = sid->locator->node_bits_length; + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT6: + case SRV6_ENDPOINT_BEHAVIOR_END_DT6_USID: + action = ZEBRA_SEG6_LOCAL_ACTION_END_DT6; + vrf = vrf_lookup_by_name(sid->attributes.vrf_name); + if (!vrf_is_enabled(vrf)) { + zlog_warn("Failed to install SID %pFX: VRF %s is inactive", &sid->addr, + sid->attributes.vrf_name); + return; + } + ctx.table = vrf->data.l.table_id; + ifp = if_get_vrf_loopback(vrf->vrf_id); + if (!ifp) { + zlog_warn("Failed to install SID %pFX: failed to get loopback for vrf %s", + &sid->addr, sid->attributes.vrf_name); + return; + } + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT4: + case SRV6_ENDPOINT_BEHAVIOR_END_DT4_USID: + action = ZEBRA_SEG6_LOCAL_ACTION_END_DT4; + vrf = vrf_lookup_by_name(sid->attributes.vrf_name); + if (!vrf_is_enabled(vrf)) { + zlog_warn("Failed to install SID %pFX: VRF %s is inactive", &sid->addr, + sid->attributes.vrf_name); + return; + } + ctx.table = vrf->data.l.table_id; + ifp = if_get_vrf_loopback(vrf->vrf_id); + if (!ifp) { + zlog_warn("Failed to install SID %pFX: failed to get loopback for vrf %s", + &sid->addr, sid->attributes.vrf_name); + return; + } + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT46: + case SRV6_ENDPOINT_BEHAVIOR_END_DT46_USID: + action = ZEBRA_SEG6_LOCAL_ACTION_END_DT46; + vrf = vrf_lookup_by_name(sid->attributes.vrf_name); + if (!vrf_is_enabled(vrf)) { + zlog_warn("Failed to install SID %pFX: VRF %s is inactive", &sid->addr, + sid->attributes.vrf_name); + return; + } + ctx.table = vrf->data.l.table_id; + ifp = if_get_vrf_loopback(vrf->vrf_id); + if (!ifp) { + zlog_warn("Failed to install SID %pFX: failed to get loopback for vrf %s", + &sid->addr, sid->attributes.vrf_name); + return; + } + break; + case SRV6_ENDPOINT_BEHAVIOR_END_X: + case SRV6_ENDPOINT_BEHAVIOR_END_X_NEXT_CSID: + case SRV6_ENDPOINT_BEHAVIOR_OPAQUE: + case SRV6_ENDPOINT_BEHAVIOR_RESERVED: + zlog_warn("unsupported behavior: %u", sid->behavior); + break; + } + + ctx.block_len = sid->locator->block_bits_length; + ctx.node_len = sid->locator->node_bits_length; + ctx.function_len = sid->locator->function_bits_length; + ctx.argument_len = sid->locator->argument_bits_length; + + /* Attach the SID to the SRv6 interface */ + if (!ifp) { + ifp = if_lookup_by_name(DEFAULT_SRV6_IFNAME, VRF_DEFAULT); + if (!ifp) { + zlog_warn("Failed to install SRv6 SID %pFX: %s interface not found", + &sid->addr, DEFAULT_SRV6_IFNAME); + return; + } + } + + /* Send the SID to zebra */ + static_zebra_send_localsid(ZEBRA_ROUTE_ADD, &sid->addr.prefix, sid->addr.prefixlen, + ifp->ifindex, action, &ctx); + + SET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA); +} + +void static_zebra_srv6_sid_uninstall(struct static_srv6_sid *sid) +{ + enum seg6local_action_t action = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC; + struct interface *ifp = NULL; + struct seg6local_context ctx = {}; + struct vrf *vrf; + + if (!sid) + return; + + if (!CHECK_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA)) + return; + + if (!sid->locator) { + zlog_err("Failed to uninstall SID %pFX: missing locator information", &sid->addr); + return; + } + + switch (sid->behavior) { + case SRV6_ENDPOINT_BEHAVIOR_END: + case SRV6_ENDPOINT_BEHAVIOR_END_NEXT_CSID: + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT6: + case SRV6_ENDPOINT_BEHAVIOR_END_DT6_USID: + vrf = vrf_lookup_by_name(sid->attributes.vrf_name); + if (!vrf_is_enabled(vrf)) { + zlog_warn("Failed to install SID %pFX: VRF %s is inactive", &sid->addr, + sid->attributes.vrf_name); + return; + } + ifp = if_get_vrf_loopback(vrf->vrf_id); + if (!ifp) { + zlog_warn("Failed to install SID %pFX: failed to get loopback for vrf %s", + &sid->addr, sid->attributes.vrf_name); + return; + } + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT4: + case SRV6_ENDPOINT_BEHAVIOR_END_DT4_USID: + vrf = vrf_lookup_by_name(sid->attributes.vrf_name); + if (!vrf_is_enabled(vrf)) { + zlog_warn("Failed to install SID %pFX: VRF %s is inactive", &sid->addr, + sid->attributes.vrf_name); + return; + } + ifp = if_get_vrf_loopback(vrf->vrf_id); + if (!ifp) { + zlog_warn("Failed to install SID %pFX: failed to get loopback for vrf %s", + &sid->addr, sid->attributes.vrf_name); + return; + } + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT46: + case SRV6_ENDPOINT_BEHAVIOR_END_DT46_USID: + vrf = vrf_lookup_by_name(sid->attributes.vrf_name); + if (!vrf_is_enabled(vrf)) { + zlog_warn("Failed to install SID %pFX: VRF %s is inactive", &sid->addr, + sid->attributes.vrf_name); + return; + } + ifp = if_get_vrf_loopback(vrf->vrf_id); + if (!ifp) { + zlog_warn("Failed to install SID %pFX: failed to get loopback for vrf %s", + &sid->addr, sid->attributes.vrf_name); + return; + } + break; + case SRV6_ENDPOINT_BEHAVIOR_END_X: + case SRV6_ENDPOINT_BEHAVIOR_END_X_NEXT_CSID: + case SRV6_ENDPOINT_BEHAVIOR_OPAQUE: + case SRV6_ENDPOINT_BEHAVIOR_RESERVED: + zlog_warn("unsupported behavior: %u", sid->behavior); + break; + } + + /* The SID is attached to the SRv6 interface */ + if (!ifp) { + ifp = if_lookup_by_name(DEFAULT_SRV6_IFNAME, VRF_DEFAULT); + if (!ifp) { + zlog_warn("%s interface not found: nothing to uninstall", + DEFAULT_SRV6_IFNAME); + return; + } + } + + ctx.block_len = sid->locator->block_bits_length; + ctx.node_len = sid->locator->node_bits_length; + ctx.function_len = sid->locator->function_bits_length; + ctx.argument_len = sid->locator->argument_bits_length; + + static_zebra_send_localsid(ZEBRA_ROUTE_DELETE, &sid->addr.prefix, sid->addr.prefixlen, + ifp->ifindex, action, &ctx); + + UNSET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA); +} + +extern void static_zebra_request_srv6_sid(struct static_srv6_sid *sid) +{ + struct srv6_sid_ctx ctx = {}; + int ret = 0; + struct vrf *vrf; + + if (!sid) + return; + + /* convert `srv6_endpoint_behavior_codepoint` to `seg6local_action_t` */ + switch (sid->behavior) { + case SRV6_ENDPOINT_BEHAVIOR_END: + case SRV6_ENDPOINT_BEHAVIOR_END_NEXT_CSID: + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END; + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT6: + case SRV6_ENDPOINT_BEHAVIOR_END_DT6_USID: + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_DT6; + /* process SRv6 SID attributes */ + /* generate table ID from the VRF name, if configured */ + if (sid->attributes.vrf_name[0] != '\0') { + vrf = vrf_lookup_by_name(sid->attributes.vrf_name); + if (!vrf_is_enabled(vrf)) + return; + ctx.vrf_id = vrf->vrf_id; + } + + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT4: + case SRV6_ENDPOINT_BEHAVIOR_END_DT4_USID: + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_DT4; + /* process SRv6 SID attributes */ + /* generate table ID from the VRF name, if configured */ + if (sid->attributes.vrf_name[0] != '\0') { + vrf = vrf_lookup_by_name(sid->attributes.vrf_name); + if (!vrf_is_enabled(vrf)) + return; + ctx.vrf_id = vrf->vrf_id; + } + + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT46: + case SRV6_ENDPOINT_BEHAVIOR_END_DT46_USID: + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_DT46; + /* process SRv6 SID attributes */ + /* generate table ID from the VRF name, if configured */ + if (sid->attributes.vrf_name[0] != '\0') { + vrf = vrf_lookup_by_name(sid->attributes.vrf_name); + if (!vrf_is_enabled(vrf)) + return; + ctx.vrf_id = vrf->vrf_id; + } + + break; + case SRV6_ENDPOINT_BEHAVIOR_END_X: + case SRV6_ENDPOINT_BEHAVIOR_END_X_NEXT_CSID: + case SRV6_ENDPOINT_BEHAVIOR_OPAQUE: + case SRV6_ENDPOINT_BEHAVIOR_RESERVED: + zlog_warn("unsupported behavior: %u", sid->behavior); + return; + } + + /* Request SRv6 SID from SID Manager */ + ret = srv6_manager_get_sid(zclient, &ctx, &sid->addr.prefix, sid->locator->name, NULL); + if (ret < 0) + zlog_warn("%s: error getting SRv6 SID!", __func__); +} + +extern void static_zebra_release_srv6_sid(struct static_srv6_sid *sid) +{ + struct srv6_sid_ctx ctx = {}; + struct vrf *vrf; + int ret = 0; + + if (!sid || !CHECK_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_VALID)) + return; + + /* convert `srv6_endpoint_behavior_codepoint` to `seg6local_action_t` */ + switch (sid->behavior) { + case SRV6_ENDPOINT_BEHAVIOR_END: + case SRV6_ENDPOINT_BEHAVIOR_END_NEXT_CSID: + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END; + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT6: + case SRV6_ENDPOINT_BEHAVIOR_END_DT6_USID: + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_DT6; + /* process SRv6 SID attributes */ + /* generate table ID from the VRF name, if configured */ + if (sid->attributes.vrf_name[0] != '\0') { + vrf = vrf_lookup_by_name(sid->attributes.vrf_name); + if (!vrf_is_enabled(vrf)) + return; + ctx.vrf_id = vrf->vrf_id; + } + + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT4: + case SRV6_ENDPOINT_BEHAVIOR_END_DT4_USID: + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_DT4; + /* process SRv6 SID attributes */ + /* generate table ID from the VRF name, if configured */ + if (sid->attributes.vrf_name[0] != '\0') { + vrf = vrf_lookup_by_name(sid->attributes.vrf_name); + if (!vrf_is_enabled(vrf)) + return; + ctx.vrf_id = vrf->vrf_id; + } + + break; + case SRV6_ENDPOINT_BEHAVIOR_END_DT46: + case SRV6_ENDPOINT_BEHAVIOR_END_DT46_USID: + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_DT46; + /* process SRv6 SID attributes */ + /* generate table ID from the VRF name, if configured */ + if (sid->attributes.vrf_name[0] != '\0') { + vrf = vrf_lookup_by_name(sid->attributes.vrf_name); + if (!vrf_is_enabled(vrf)) + return; + ctx.vrf_id = vrf->vrf_id; + } + + break; + case SRV6_ENDPOINT_BEHAVIOR_END_X: + case SRV6_ENDPOINT_BEHAVIOR_END_X_NEXT_CSID: + case SRV6_ENDPOINT_BEHAVIOR_OPAQUE: + case SRV6_ENDPOINT_BEHAVIOR_RESERVED: + zlog_warn("unsupported behavior: %u", sid->behavior); + return; + } + + /* remove the SRv6 SID from the zebra RIB */ + ret = srv6_manager_release_sid(zclient, &ctx); + if (ret == ZCLIENT_SEND_FAILURE) + flog_err(EC_LIB_ZAPI_SOCKET, "zclient_send_get_srv6_sid() delete failed: %s", + safe_strerror(errno)); +} + +/** + * Ask the SRv6 Manager (zebra) about a specific locator + * + * @param name Locator name + * @return 0 on success, -1 otherwise + */ +int static_zebra_srv6_manager_get_locator(const char *name) +{ + if (!name) + return -1; + + /* + * Send the Get Locator request to the SRv6 Manager and return the + * result + */ + return srv6_manager_get_locator(zclient, name); +} + +static void request_srv6_sids(struct static_srv6_locator *locator) +{ + struct static_srv6_sid *sid; + struct listnode *node; + + for (ALL_LIST_ELEMENTS_RO(srv6_sids, node, sid)) { + if (sid->locator == locator) + static_zebra_request_srv6_sid(sid); + } +} + +/** + * Internal function to process an SRv6 locator + * + * @param locator The locator to be processed + */ +static int static_zebra_process_srv6_locator_internal(struct srv6_locator *locator) +{ + struct static_srv6_locator *loc; + struct listnode *node; + struct static_srv6_sid *sid; + + if (!locator) + return -1; + + DEBUGD(&static_dbg_srv6, + "%s: Received SRv6 locator %s %pFX, loc-block-len=%u, loc-node-len=%u func-len=%u, arg-len=%u", + __func__, locator->name, &locator->prefix, locator->block_bits_length, + locator->node_bits_length, locator->function_bits_length, + locator->argument_bits_length); + + /* If we are already aware about the locator, nothing to do */ + loc = static_srv6_locator_lookup(locator->name); + if (loc) + return 0; + + loc = static_srv6_locator_alloc(locator->name); + + DEBUGD(&static_dbg_srv6, "%s: SRv6 locator (locator %s, prefix %pFX) set", __func__, + locator->name, &locator->prefix); + + /* Store the locator prefix */ + loc->prefix = locator->prefix; + loc->block_bits_length = locator->block_bits_length; + loc->node_bits_length = locator->node_bits_length; + loc->function_bits_length = locator->function_bits_length; + loc->argument_bits_length = locator->argument_bits_length; + loc->flags = locator->flags; + + listnode_add(srv6_locators, loc); + + for (ALL_LIST_ELEMENTS_RO(srv6_sids, node, sid)) { + if (strncmp(sid->locator_name, loc->name, sizeof(loc->name)) == 0) + sid->locator = loc; + } + + /* Request SIDs from the locator */ + request_srv6_sids(loc); + + return 0; +} + +/** + * Callback to process an SRv6 locator received from SRv6 Manager (zebra). + * + * @result 0 on success, -1 otherwise + */ +static int static_zebra_process_srv6_locator_add(ZAPI_CALLBACK_ARGS) +{ + struct srv6_locator loc = {}; + + if (!srv6_locators) + return -1; + + /* Decode the SRv6 locator */ + if (zapi_srv6_locator_decode(zclient->ibuf, &loc) < 0) + return -1; + + return static_zebra_process_srv6_locator_internal(&loc); +} + +/** + * Callback to process a notification from SRv6 Manager (zebra) of an SRv6 + * locator deleted. + * + * @result 0 on success, -1 otherwise + */ +static int static_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) +{ + struct srv6_locator loc = {}; + struct listnode *node2, *nnode2; + struct static_srv6_sid *sid; + struct static_srv6_locator *locator; + + if (!srv6_locators) + return -1; + + /* Decode the received zebra message */ + if (zapi_srv6_locator_decode(zclient->ibuf, &loc) < 0) + return -1; + + DEBUGD(&static_dbg_srv6, + "%s: SRv6 locator deleted in zebra: name %s, prefix %pFX, block_len %u, node_len %u, func_len %u, arg_len %u", + __func__, loc.name, &loc.prefix, loc.block_bits_length, loc.node_bits_length, + loc.function_bits_length, loc.argument_bits_length); + + locator = static_srv6_locator_lookup(loc.name); + if (!locator) + return 0; + + DEBUGD(&static_dbg_srv6, "%s: Deleting srv6 sids from locator %s", __func__, locator->name); + + /* Delete SRv6 SIDs */ + for (ALL_LIST_ELEMENTS(srv6_sids, node2, nnode2, sid)) { + if (sid->locator != locator) + continue; + + + DEBUGD(&static_dbg_srv6, "%s: Deleting SRv6 SID (locator %s, sid %pFX)", __func__, + locator->name, &sid->addr); + + /* + * Uninstall the SRv6 SID from the forwarding plane + * through Zebra + */ + if (CHECK_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA)) { + static_zebra_srv6_sid_uninstall(sid); + UNSET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA); + } + } + + listnode_delete(srv6_locators, locator); + static_srv6_locator_free(locator); + + return 0; +} + +static int static_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS) +{ + struct srv6_sid_ctx ctx; + struct in6_addr sid_addr; + enum zapi_srv6_sid_notify note; + uint32_t sid_func; + struct listnode *node; + char buf[256]; + struct static_srv6_sid *sid = NULL; + char *loc_name; + bool found = false; + + if (!srv6_locators) + return -1; + + /* Decode the received notification message */ + if (!zapi_srv6_sid_notify_decode(zclient->ibuf, &ctx, &sid_addr, &sid_func, NULL, ¬e, + &loc_name)) { + zlog_err("%s : error in msg decode", __func__); + return -1; + } + + DEBUGD(&static_dbg_srv6, + "%s: received SRv6 SID notify: ctx %s sid_value %pI6 sid_func %u note %s", __func__, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx), &sid_addr, sid_func, + zapi_srv6_sid_notify2str(note)); + + /* Handle notification */ + switch (note) { + case ZAPI_SRV6_SID_ALLOCATED: + + DEBUGD(&static_dbg_srv6, "%s: SRv6 SID %pI6 %s ALLOCATED", __func__, &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + + for (ALL_LIST_ELEMENTS_RO(srv6_sids, node, sid)) { + if (IPV6_ADDR_SAME(&sid->addr.prefix, &sid_addr)) { + found = true; + break; + } + } + + if (!found || !sid) { + zlog_err("SRv6 SID %pI6 %s: not found", &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + return 0; + } + + SET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_VALID); + + /* + * Install the new SRv6 End SID in the forwarding plane through + * Zebra + */ + static_zebra_srv6_sid_install(sid); + + SET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_SENT_TO_ZEBRA); + + break; + case ZAPI_SRV6_SID_RELEASED: + + DEBUGD(&static_dbg_srv6, "%s: SRv6 SID %pI6 %s: RELEASED", __func__, &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + + for (ALL_LIST_ELEMENTS_RO(srv6_sids, node, sid)) { + if (IPV6_ADDR_SAME(&sid->addr.prefix, &sid_addr)) { + found = true; + break; + } + } + + if (!found || !sid) { + zlog_err("SRv6 SID %pI6 %s: not found", &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + return 0; + } + + UNSET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_VALID); + + break; + case ZAPI_SRV6_SID_FAIL_ALLOC: + zlog_err("SRv6 SID %pI6 %s: Failed to allocate", &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + + /* Error will be logged by zebra module */ + break; + case ZAPI_SRV6_SID_FAIL_RELEASE: + zlog_err("%s: SRv6 SID %pI6 %s failure to release", __func__, &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + + /* Error will be logged by zebra module */ + break; + } + + return 0; +} + static zclient_handler *const static_handlers[] = { [ZEBRA_INTERFACE_ADDRESS_ADD] = interface_address_add, [ZEBRA_INTERFACE_ADDRESS_DELETE] = interface_address_delete, [ZEBRA_ROUTE_NOTIFY_OWNER] = route_notify_owner, + [ZEBRA_SRV6_LOCATOR_ADD] = static_zebra_process_srv6_locator_add, + [ZEBRA_SRV6_LOCATOR_DELETE] = static_zebra_process_srv6_locator_delete, + [ZEBRA_SRV6_SID_NOTIFY] = static_zebra_srv6_sid_notify, }; void static_zebra_init(void) diff --git a/staticd/static_zebra.h b/staticd/static_zebra.h index c4f4ebdcbc..2a94c6dad9 100644 --- a/staticd/static_zebra.h +++ b/staticd/static_zebra.h @@ -7,6 +7,8 @@ #ifndef __STATIC_ZEBRA_H__ #define __STATIC_ZEBRA_H__ +#include "static_srv6.h" + #ifdef __cplusplus extern "C" { #endif @@ -22,6 +24,14 @@ extern void static_zebra_stop(void); extern void static_zebra_vrf_register(struct vrf *vrf); extern void static_zebra_vrf_unregister(struct vrf *vrf); +extern int static_zebra_srv6_manager_get_locator(const char *name); + +extern void static_zebra_request_srv6_sid(struct static_srv6_sid *sid); +extern void static_zebra_release_srv6_sid(struct static_srv6_sid *sid); + +extern void static_zebra_srv6_sid_install(struct static_srv6_sid *sid); +extern void static_zebra_srv6_sid_uninstall(struct static_srv6_sid *sid); + #ifdef __cplusplus } #endif diff --git a/staticd/subdir.am b/staticd/subdir.am index 07ebe3c02c..bdbacbdd68 100644 --- a/staticd/subdir.am +++ b/staticd/subdir.am @@ -19,6 +19,7 @@ staticd_libstatic_a_SOURCES = \ staticd/static_vty.c \ staticd/static_nb.c \ staticd/static_nb_config.c \ + staticd/static_srv6.c \ # end noinst_HEADERS += \ @@ -29,6 +30,7 @@ noinst_HEADERS += \ staticd/static_vty.h \ staticd/static_vrf.h \ staticd/static_nb.h \ + staticd/static_srv6.h \ # end clippy_scan += \ diff --git a/tests/lib/northbound/test_oper_data.c b/tests/lib/northbound/test_oper_data.c index 3d700d8a19..0b334c6522 100644 --- a/tests/lib/northbound/test_oper_data.c +++ b/tests/lib/northbound/test_oper_data.c @@ -120,6 +120,26 @@ static const void *frr_test_module_vrfs_vrf_interfaces_interface_get_next( } /* + * XPath: /frr-test-module:frr-test-module/vrfs/vrf/interfaces/interface-new + */ +static enum nb_error frr_test_module_vrfs_vrf_interfaces_interface_new_get( + const struct nb_node *nb_node, const void *parent_list_entry, struct lyd_node *parent) +{ + const struct lysc_node *snode = nb_node->snode; + const struct tvrf *vrf; + struct listnode *node; + const char *interface; + LY_ERR err; + + vrf = listgetdata((struct listnode *)parent_list_entry); + for (ALL_LIST_ELEMENTS_RO(vrf->interfaces, node, interface)) { + err = lyd_new_term(parent, snode->module, snode->name, interface, false, NULL); + assert(err == LY_SUCCESS); + } + return NB_OK; +} + +/* * XPath: /frr-test-module:frr-test-module/vrfs/vrf/routes/route */ static const void * @@ -228,10 +248,19 @@ frr_test_module_c1value_get_elem(struct nb_cb_get_elem_args *args) /* * XPath: /frr-test-module:frr-test-module/c2cont/c2value */ -static struct yang_data * -frr_test_module_c2cont_c2value_get_elem(struct nb_cb_get_elem_args *args) +static enum nb_error frr_test_module_c2cont_c2value_get(const struct nb_node *nb_node, + const void *parent_list_entry, + struct lyd_node *parent) { - return yang_data_new_uint32(args->xpath, 0xAB010203); + const struct lysc_node *snode = nb_node->snode; + uint32_t value = 0xAB010203; + LY_ERR err; + + err = lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value), + LYD_NEW_PATH_UPDATE, NULL); + assert(err == LY_SUCCESS); + + return NB_OK; } /* clang-format off */ @@ -254,6 +283,10 @@ const struct frr_yang_module_info frr_test_module_info = { .cbs.get_next = frr_test_module_vrfs_vrf_interfaces_interface_get_next, }, { + .xpath = "/frr-test-module:frr-test-module/vrfs/vrf/interfaces/interface-new", + .cbs.get = frr_test_module_vrfs_vrf_interfaces_interface_new_get, + }, + { .xpath = "/frr-test-module:frr-test-module/vrfs/vrf/routes/route", .cbs.get_next = frr_test_module_vrfs_vrf_routes_route_get_next, }, @@ -287,7 +320,7 @@ const struct frr_yang_module_info frr_test_module_info = { }, { .xpath = "/frr-test-module:frr-test-module/c2cont/c2value", - .cbs.get_elem = frr_test_module_c2cont_c2value_get_elem, + .cbs.get = frr_test_module_c2cont_c2value_get, }, { .xpath = NULL, diff --git a/tests/lib/northbound/test_oper_data.in b/tests/lib/northbound/test_oper_data.in index f7c44cad31..0053148953 100644 --- a/tests/lib/northbound/test_oper_data.in +++ b/tests/lib/northbound/test_oper_data.in @@ -1,2 +1,5 @@ show yang operational-data /frr-test-module:frr-test-module +show yang operational-data /frr-test-module:frr-test-module/vrfs/vrf[name='vrf0']/routes/route[2] +show yang operational-data /frr-test-module:frr-test-module/vrfs/vrf[name='vrf0']/routes/route[3]/interface +show yang operational-data /frr-test-module:frr-test-module/vrfs/vrf[name='vrf0']/routes/route[10] test rpc diff --git a/tests/lib/northbound/test_oper_data.refout b/tests/lib/northbound/test_oper_data.refout index 7c56564143..2536e0306b 100644 --- a/tests/lib/northbound/test_oper_data.refout +++ b/tests/lib/northbound/test_oper_data.refout @@ -11,6 +11,12 @@ test# show yang operational-data /frr-test-module:frr-test-module "eth1",
"eth2",
"eth3"
+ ],
+ "interface-new": [
+ "eth0",
+ "eth1",
+ "eth2",
+ "eth3"
]
},
"routes": {
@@ -65,6 +71,12 @@ test# show yang operational-data /frr-test-module:frr-test-module "eth1",
"eth2",
"eth3"
+ ],
+ "interface-new": [
+ "eth0",
+ "eth1",
+ "eth2",
+ "eth3"
]
},
"routes": {
@@ -119,6 +131,49 @@ test# show yang operational-data /frr-test-module:frr-test-module }
}
}
+test# show yang operational-data /frr-test-module:frr-test-module/vrfs/vrf[name='vrf0']/routes/route[2] +{
+ "frr-test-module:frr-test-module": {
+ "vrfs": {
+ "vrf": [
+ {
+ "name": "vrf0",
+ "routes": {
+ "route": [
+ {
+ "prefix": "10.0.0.1/32",
+ "next-hop": "172.16.0.1",
+ "interface": "eth1",
+ "metric": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ }
+}
+test# show yang operational-data /frr-test-module:frr-test-module/vrfs/vrf[name='vrf0']/routes/route[3]/interface +{
+ "frr-test-module:frr-test-module": {
+ "vrfs": {
+ "vrf": [
+ {
+ "name": "vrf0",
+ "routes": {
+ "route": [
+ {
+ "interface": "eth2"
+ }
+ ]
+ }
+ }
+ ]
+ }
+ }
+}
+test# show yang operational-data /frr-test-module:frr-test-module/vrfs/vrf[name='vrf0']/routes/route[10] +{}
test# test rpc vrf testname data testdata test# diff --git a/tests/topotests/bfd_vrf_topo1/test_bfd_vrf_topo1.py b/tests/topotests/bfd_vrf_topo1/test_bfd_vrf_topo1.py index f6adff61d0..f00af34e39 100644 --- a/tests/topotests/bfd_vrf_topo1/test_bfd_vrf_topo1.py +++ b/tests/topotests/bfd_vrf_topo1/test_bfd_vrf_topo1.py @@ -84,11 +84,9 @@ def setup_module(mod): router.net.set_intf_netns(rname + "-eth2", ns, up=True) for rname, router in router_list.items(): - router.load_config(TopoRouter.RD_MGMTD, None, "--vrfwnetns") + router.use_netns_vrf() router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, "{}/zebra.conf".format(rname)), - "--vrfwnetns", + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname)) diff --git a/tests/topotests/bgp_aggregate_address_topo1/r1/bgp_192_168_0_1.json b/tests/topotests/bgp_aggregate_address_topo1/r1/bgp_192_168_0_1.json new file mode 100644 index 0000000000..8c0da8dc92 --- /dev/null +++ b/tests/topotests/bgp_aggregate_address_topo1/r1/bgp_192_168_0_1.json @@ -0,0 +1,41 @@ +{ + "prefix":"192.168.0.1/32", + "paths":[ + { + "aspath":{ + "string":"65001", + "segments":[ + { + "type":"as-sequence", + "list":[ + 65001 + ] + } + ], + "length":1 + }, + "suppressed":true, + "origin":"IGP", + "metric":10, + "valid":true, + "bestpath":{ + "overall":true, + "selectionReason":"First path received" + }, + "nexthops":[ + { + "ip":"10.0.0.2", + "afi":"ipv4", + "metric":0, + "accessible":true, + "used":true + } + ], + "peer":{ + "peerId":"10.0.0.2", + "routerId":"10.254.254.3", + "type":"external" + } + } + ] +} diff --git a/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py b/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py index 370d01e525..a0a1027c98 100644 --- a/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py +++ b/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py @@ -13,6 +13,7 @@ Test BGP aggregate address features. """ +import json import os import sys import pytest @@ -265,6 +266,24 @@ match ip address acl-sup-three ) +def test_check_bgp_attribute(): + "Dump the suppressed attribute of the 192.168.0.1/32 prefix in r1." + tgen = get_topogen() + + logger.info("Test that the BGP path to 192.168.0.1 is as expected.") + expected = json.loads(open("{}/r1/bgp_192_168_0_1.json".format(CWD)).read()) + + test_func = functools.partial( + topotest.router_json_cmp, + tgen.gears["r1"], + "show bgp ipv4 192.168.0.1/32 json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) + assertmsg = '"r1" BGP 192.168.0.1 route output failed' + assert result is None, assertmsg + + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() diff --git a/tests/topotests/bgp_bfd_session/r1/frr.conf b/tests/topotests/bgp_bfd_session/r1/frr.conf index a1560b09fa..cea1ff8147 100644 --- a/tests/topotests/bgp_bfd_session/r1/frr.conf +++ b/tests/topotests/bgp_bfd_session/r1/frr.conf @@ -11,4 +11,16 @@ router bgp 65000 neighbor 192.168.1.3 bfd neighbor 192.168.1.3 ebgp-multihop 20 neighbor 192.168.1.3 update-source r1-eth0 + neighbor PG peer-group + neighbor PG remote-as auto + neighbor PG bfd + neighbor PG ebgp-multihop 15 + neighbor PG update-source 10.0.0.1 + neighbor 192.168.1.4 peer-group PG + neighbor PG2 peer-group + neighbor PG2 remote-as auto + neighbor PG2 bfd + neighbor PG2 ebgp-multihop 25 + neighbor 192.168.1.5 peer-group PG2 + neighbor 192.168.1.5 update-source 10.0.0.1 exit diff --git a/tests/topotests/bgp_bfd_session/test_bgp_bfd_session.py b/tests/topotests/bgp_bfd_session/test_bgp_bfd_session.py index adf557af7b..0dbb2f089b 100644 --- a/tests/topotests/bgp_bfd_session/test_bgp_bfd_session.py +++ b/tests/topotests/bgp_bfd_session/test_bgp_bfd_session.py @@ -85,7 +85,29 @@ def test_bgp_bfd_session(): "diagnostic": "ok", "remote-diagnostic": "ok", "type": "dynamic", - } + }, + { + "multihop": True, + "peer": "192.168.1.4", + "local": "10.0.0.1", + "vrf": "default", + "minimum-ttl": 241, + "status": "down", + "diagnostic": "ok", + "remote-diagnostic": "ok", + "type": "dynamic", + }, + { + "multihop": True, + "peer": "192.168.1.5", + "local": "10.0.0.1", + "vrf": "default", + "minimum-ttl": 231, + "status": "down", + "diagnostic": "ok", + "remote-diagnostic": "ok", + "type": "dynamic", + }, ] return topotest.json_cmp(output, expected) diff --git a/tests/topotests/bgp_bmp/bgpbmp.py b/tests/topotests/bgp_bmp/bgpbmp.py index eac78a63f7..acbc405aa4 100644 --- a/tests/topotests/bgp_bmp/bgpbmp.py +++ b/tests/topotests/bgp_bmp/bgpbmp.py @@ -187,12 +187,19 @@ def bmp_check_for_prefixes( def bmp_check_for_peer_message( - expected_peers, bmp_log_type, bmp_collector, bmp_log_file, is_rd_instance=False + expected_peers, + bmp_log_type, + bmp_collector, + bmp_log_file, + is_rd_instance=False, + peer_bgp_id=None, + peer_distinguisher=None, ): """ Check for the presence of a peer up message for the peer """ global SEQ + last_seq = SEQ # we care only about the new messages messages = [ @@ -208,6 +215,10 @@ def bmp_check_for_peer_message( for m in messages: if is_rd_instance and m["peer_distinguisher"] == "0:0": continue + if peer_distinguisher and m["peer_distinguisher"] != peer_distinguisher: + continue + if peer_bgp_id and m["peer_bgp_id"] != peer_bgp_id: + continue if ( "peer_ip" in m.keys() and m["peer_ip"] != "0.0.0.0" @@ -215,16 +226,23 @@ def bmp_check_for_peer_message( ): if is_rd_instance and m["peer_type"] != "route distinguisher instance": continue - peers.append(m["peer_ip"]) + peers.append((m["peer_ip"], m["seq"])) elif m["policy"] == "loc-rib" and m["bmp_log_type"] == bmp_log_type: - peers.append("0.0.0.0") + peers.append(("0.0.0.0", m["seq"])) # check for prefixes for ep in expected_peers: - if ep not in peers: + for _ip, _seq in peers: + if ep == _ip: + msg = "The peer {} is present in the {} log messages." + logger.debug(msg.format(ep, bmp_log_type)) + if _seq > last_seq: + last_seq = _seq + break + else: msg = "The peer {} is not present in the {} log messages." logger.debug(msg.format(ep, bmp_log_type)) return False - SEQ = messages[-1]["seq"] + SEQ = last_seq return True diff --git a/tests/topotests/bgp_bmp/bmp1import/bmp-update-loc-rib-step1.json b/tests/topotests/bgp_bmp/bmp1import/bmp-update-loc-rib-step1.json new file mode 100644 index 0000000000..3542f4e495 --- /dev/null +++ b/tests/topotests/bgp_bmp/bmp1import/bmp-update-loc-rib-step1.json @@ -0,0 +1,34 @@ +{ + "loc-rib": { + "update": { + "172.31.0.77/32": { + "as_path": "", + "bgp_nexthop": "192.168.1.3", + "bmp_log_type": "update", + "ip_prefix": "172.31.0.77/32", + "is_filtered": false, + "origin": "IGP", + "peer_asn": 65501, + "peer_bgp_id": "192.168.0.1", + "peer_distinguisher": "444:1", + "peer_type": "loc-rib instance", + "policy": "loc-rib" + }, + "2001::1125/128": { + "afi": 2, + "as_path": "", + "bmp_log_type": "update", + "ip_prefix": "2001::1125/128", + "is_filtered": false, + "nxhp_ip": "192:167::3", + "origin": "IGP", + "peer_asn": 65501, + "peer_bgp_id": "192.168.0.1", + "peer_distinguisher": "555:1", + "peer_type": "loc-rib instance", + "policy": "loc-rib", + "safi": 1 + } + } + } +} diff --git a/tests/topotests/bgp_bmp/bmp1import/bmp-update-loc-rib-step2.json b/tests/topotests/bgp_bmp/bmp1import/bmp-update-loc-rib-step2.json new file mode 100644 index 0000000000..60066d502c --- /dev/null +++ b/tests/topotests/bgp_bmp/bmp1import/bmp-update-loc-rib-step2.json @@ -0,0 +1,34 @@ +{ + "loc-rib": { + "update": { + "172.31.0.77/32": { + "as_path": "", + "bgp_nexthop": "192.168.1.3", + "bmp_log_type": "update", + "ip_prefix": "172.31.0.77/32", + "is_filtered": false, + "origin": "IGP", + "peer_asn": 65501, + "peer_bgp_id": "192.168.0.1", + "peer_distinguisher": "666:22", + "peer_type": "loc-rib instance", + "policy": "loc-rib" + }, + "2001::1125/128": { + "afi": 2, + "as_path": "", + "bmp_log_type": "update", + "ip_prefix": "2001::1125/128", + "is_filtered": false, + "nxhp_ip": "192:167::3", + "origin": "IGP", + "peer_asn": 65501, + "peer_bgp_id": "192.168.0.1", + "peer_distinguisher": "666:22", + "peer_type": "loc-rib instance", + "policy": "loc-rib", + "safi": 1 + } + } + } +} diff --git a/tests/topotests/bgp_bmp/bmp1import/bmp-update-post-policy-step1.json b/tests/topotests/bgp_bmp/bmp1import/bmp-update-post-policy-step1.json new file mode 100644 index 0000000000..cf71f20485 --- /dev/null +++ b/tests/topotests/bgp_bmp/bmp1import/bmp-update-post-policy-step1.json @@ -0,0 +1,36 @@ +{ + "post-policy": { + "update": { + "172.31.0.77/32": { + "as_path": "", + "bgp_nexthop": "192.168.1.3", + "bmp_log_type": "update", + "ip_prefix": "172.31.0.77/32", + "ipv6": false, + "origin": "IGP", + "peer_asn": 65501, + "peer_bgp_id": "192.168.1.3", + "peer_distinguisher": "444:1", + "peer_ip": "192.168.1.3", + "peer_type": "route distinguisher instance", + "policy": "post-policy" + }, + "2001::1125/128": { + "afi": 2, + "as_path": "", + "bmp_log_type": "update", + "ip_prefix": "2001::1125/128", + "ipv6": true, + "nxhp_ip": "192:167::3", + "origin": "IGP", + "peer_asn": 65501, + "peer_bgp_id": "192.168.1.3", + "peer_distinguisher": "555:1", + "peer_ip": "192:167::3", + "peer_type": "route distinguisher instance", + "policy": "post-policy", + "safi": 1 + } + } + } +} diff --git a/tests/topotests/bgp_bmp/bmp1import/bmp-update-post-policy-step2.json b/tests/topotests/bgp_bmp/bmp1import/bmp-update-post-policy-step2.json new file mode 100644 index 0000000000..b555c2a371 --- /dev/null +++ b/tests/topotests/bgp_bmp/bmp1import/bmp-update-post-policy-step2.json @@ -0,0 +1,36 @@ +{ + "post-policy": { + "update": { + "172.31.0.77/32": { + "as_path": "", + "bgp_nexthop": "192.168.1.3", + "bmp_log_type": "update", + "ip_prefix": "172.31.0.77/32", + "ipv6": false, + "origin": "IGP", + "peer_asn": 65501, + "peer_bgp_id": "192.168.1.3", + "peer_distinguisher": "666:22", + "peer_ip": "192.168.1.3", + "peer_type": "route distinguisher instance", + "policy": "post-policy" + }, + "2001::1125/128": { + "afi": 2, + "as_path": "", + "bmp_log_type": "update", + "ip_prefix": "2001::1125/128", + "ipv6": true, + "nxhp_ip": "192:167::3", + "origin": "IGP", + "peer_asn": 65501, + "peer_bgp_id": "192.168.1.3", + "peer_distinguisher": "666:22", + "peer_ip": "192:167::3", + "peer_type": "route distinguisher instance", + "policy": "post-policy", + "safi": 1 + } + } + } +} diff --git a/tests/topotests/bgp_bmp/bmp1import/bmp-update-pre-policy-step1.json b/tests/topotests/bgp_bmp/bmp1import/bmp-update-pre-policy-step1.json new file mode 100644 index 0000000000..43273cc93a --- /dev/null +++ b/tests/topotests/bgp_bmp/bmp1import/bmp-update-pre-policy-step1.json @@ -0,0 +1,36 @@ +{ + "pre-policy": { + "update": { + "172.31.0.77/32": { + "as_path": "", + "bgp_nexthop": "192.168.1.3", + "bmp_log_type": "update", + "ip_prefix": "172.31.0.77/32", + "ipv6": false, + "origin": "IGP", + "peer_asn": 65501, + "peer_bgp_id": "192.168.1.3", + "peer_distinguisher": "444:1", + "peer_ip": "192.168.1.3", + "peer_type": "route distinguisher instance", + "policy": "pre-policy" + }, + "2001::1125/128": { + "afi": 2, + "as_path": "", + "bmp_log_type": "update", + "ip_prefix": "2001::1125/128", + "ipv6": true, + "nxhp_ip": "192:167::3", + "origin": "IGP", + "peer_asn": 65501, + "peer_bgp_id": "192.168.1.3", + "peer_distinguisher": "555:1", + "peer_ip": "192:167::3", + "peer_type": "route distinguisher instance", + "policy": "pre-policy", + "safi": 1 + } + } + } +} diff --git a/tests/topotests/bgp_bmp/bmp1import/bmp-update-pre-policy-step2.json b/tests/topotests/bgp_bmp/bmp1import/bmp-update-pre-policy-step2.json new file mode 100644 index 0000000000..20549926d5 --- /dev/null +++ b/tests/topotests/bgp_bmp/bmp1import/bmp-update-pre-policy-step2.json @@ -0,0 +1,36 @@ +{ + "pre-policy": { + "update": { + "172.31.0.77/32": { + "as_path": "", + "bgp_nexthop": "192.168.1.3", + "bmp_log_type": "update", + "ip_prefix": "172.31.0.77/32", + "ipv6": false, + "origin": "IGP", + "peer_asn": 65501, + "peer_bgp_id": "192.168.1.3", + "peer_distinguisher": "666:22", + "peer_ip": "192.168.1.3", + "peer_type": "route distinguisher instance", + "policy": "pre-policy" + }, + "2001::1125/128": { + "afi": 2, + "as_path": "", + "bmp_log_type": "update", + "ip_prefix": "2001::1125/128", + "ipv6": true, + "nxhp_ip": "192:167::3", + "origin": "IGP", + "peer_asn": 65501, + "peer_bgp_id": "192.168.1.3", + "peer_distinguisher": "666:22", + "peer_ip": "192:167::3", + "peer_type": "route distinguisher instance", + "policy": "pre-policy", + "safi": 1 + } + } + } +} diff --git a/tests/topotests/bgp_bmp/bmp1import/bmp-withdraw-loc-rib-step1.json b/tests/topotests/bgp_bmp/bmp1import/bmp-withdraw-loc-rib-step1.json new file mode 100644 index 0000000000..fcf518390d --- /dev/null +++ b/tests/topotests/bgp_bmp/bmp1import/bmp-withdraw-loc-rib-step1.json @@ -0,0 +1,28 @@ +{ + "loc-rib": { + "withdraw": { + "172.31.0.77/32": { + "bmp_log_type": "withdraw", + "ip_prefix": "172.31.0.77/32", + "is_filtered": false, + "peer_asn": 65501, + "peer_bgp_id": "192.168.0.1", + "peer_distinguisher": "444:1", + "peer_type": "loc-rib instance", + "policy": "loc-rib" + }, + "2001::1125/128": { + "afi": 2, + "bmp_log_type": "withdraw", + "ip_prefix": "2001::1125/128", + "is_filtered": false, + "peer_asn": 65501, + "peer_bgp_id": "192.168.0.1", + "peer_distinguisher": "555:1", + "peer_type": "loc-rib instance", + "policy": "loc-rib", + "safi": 1 + } + } + } +} diff --git a/tests/topotests/bgp_bmp/bmp1import/bmp-withdraw-loc-rib-step2.json b/tests/topotests/bgp_bmp/bmp1import/bmp-withdraw-loc-rib-step2.json new file mode 100644 index 0000000000..1e5040ba60 --- /dev/null +++ b/tests/topotests/bgp_bmp/bmp1import/bmp-withdraw-loc-rib-step2.json @@ -0,0 +1,34 @@ +{ + "loc-rib": { + "withdraw": { + "172.31.0.15/32": { + "afi": 1, + "bmp_log_type": "withdraw", + "ip_prefix": "172.31.0.15/32", + "is_filtered": false, + "label": 0, + "peer_asn": 65501, + "peer_bgp_id": "192.168.0.1", + "peer_distinguisher": "0:0", + "peer_type": "loc-rib instance", + "policy": "loc-rib", + "rd": "444:2", + "safi": 128 + }, + "2001::1111/128": { + "afi": 2, + "bmp_log_type": "withdraw", + "ip_prefix": "2001::1111/128", + "is_filtered": false, + "label": 0, + "peer_asn": 65501, + "peer_bgp_id": "192.168.0.1", + "peer_distinguisher": "0:0", + "peer_type": "loc-rib instance", + "policy": "loc-rib", + "rd": "555:2", + "safi": 128 + } + } + } +} diff --git a/tests/topotests/bgp_bmp/bmp1import/bmp-withdraw-post-policy-step1.json b/tests/topotests/bgp_bmp/bmp1import/bmp-withdraw-post-policy-step1.json new file mode 100644 index 0000000000..6626e91361 --- /dev/null +++ b/tests/topotests/bgp_bmp/bmp1import/bmp-withdraw-post-policy-step1.json @@ -0,0 +1,30 @@ +{ + "post-policy": { + "withdraw": { + "172.31.0.77/32": { + "bmp_log_type": "withdraw", + "ip_prefix": "172.31.0.77/32", + "ipv6": false, + "peer_asn": 65501, + "peer_bgp_id": "192.168.1.3", + "peer_distinguisher": "444:1", + "peer_ip": "192.168.1.3", + "peer_type": "route distinguisher instance", + "policy": "post-policy" + }, + "2001::1125/128": { + "afi": 2, + "bmp_log_type": "withdraw", + "ip_prefix": "2001::1125/128", + "ipv6": true, + "peer_asn": 65501, + "peer_bgp_id": "192.168.1.3", + "peer_distinguisher": "555:1", + "peer_ip": "192:167::3", + "peer_type": "route distinguisher instance", + "policy": "post-policy", + "safi": 1 + } + } + } +} diff --git a/tests/topotests/bgp_bmp/bmp1import/bmp-withdraw-pre-policy-step1.json b/tests/topotests/bgp_bmp/bmp1import/bmp-withdraw-pre-policy-step1.json new file mode 100644 index 0000000000..d3fb1b7ba1 --- /dev/null +++ b/tests/topotests/bgp_bmp/bmp1import/bmp-withdraw-pre-policy-step1.json @@ -0,0 +1,30 @@ +{ + "pre-policy": { + "withdraw": { + "172.31.0.77/32": { + "bmp_log_type": "withdraw", + "ip_prefix": "172.31.0.77/32", + "ipv6": false, + "peer_asn": 65501, + "peer_bgp_id": "192.168.1.3", + "peer_distinguisher": "444:1", + "peer_ip": "192.168.1.3", + "peer_type": "route distinguisher instance", + "policy": "pre-policy" + }, + "2001::1125/128": { + "afi": 2, + "bmp_log_type": "withdraw", + "ip_prefix": "2001::1125/128", + "ipv6": true, + "peer_asn": 65501, + "peer_bgp_id": "192.168.1.3", + "peer_distinguisher": "555:1", + "peer_ip": "192:167::3", + "peer_type": "route distinguisher instance", + "policy": "pre-policy", + "safi": 1 + } + } + } +} diff --git a/tests/topotests/bgp_bmp/r1import/frr.conf b/tests/topotests/bgp_bmp/r1import/frr.conf new file mode 100644 index 0000000000..bec4eb01c7 --- /dev/null +++ b/tests/topotests/bgp_bmp/r1import/frr.conf @@ -0,0 +1,73 @@ +interface r1import-eth0 + ip address 192.0.2.1/24 +! +interface r1import-eth1 + ip address 192.168.0.1/24 + ipv6 address 192:168::1/64 +! +interface r1import-eth2 + ip address 192.168.1.1/24 + ipv6 address 192:167::1/64 +! +router bgp 65501 + bgp router-id 192.168.0.1 + bgp log-neighbor-changes + no bgp ebgp-requires-policy + neighbor 192.168.0.2 remote-as 65502 + neighbor 192:168::2 remote-as 65502 +! + bmp targets bmp1 + bmp connect 192.0.2.10 port 1789 min-retry 100 max-retry 10000 + bmp monitor ipv4 unicast pre-policy + bmp monitor ipv6 unicast pre-policy + bmp monitor ipv4 unicast post-policy + bmp monitor ipv6 unicast post-policy + bmp monitor ipv4 unicast loc-rib + bmp monitor ipv6 unicast loc-rib + bmp import-vrf-view vrf1 + exit +! + address-family ipv4 vpn + neighbor 192.168.0.2 activate + neighbor 192.168.0.2 soft-reconfiguration inbound + exit-address-family + address-family ipv6 vpn + neighbor 192:168::2 activate + neighbor 192:168::2 soft-reconfiguration inbound + exit-address-family + address-family ipv4 unicast + neighbor 192.168.0.2 activate + neighbor 192.168.0.2 soft-reconfiguration inbound + no neighbor 192:168::2 activate + exit-address-family +! + address-family ipv6 unicast + neighbor 192:168::2 activate + neighbor 192:168::2 soft-reconfiguration inbound + exit-address-family +! +router bgp 65501 vrf vrf1 + bgp router-id 192.168.0.1 + bgp log-neighbor-changes + neighbor 192.168.1.3 remote-as 65501 + neighbor 192:167::3 remote-as 65501 + address-family ipv4 unicast + neighbor 192.168.1.3 activate + neighbor 192.168.1.3 soft-reconfiguration inbound + no neighbor 192:167::3 activate + label vpn export 101 + rd vpn export 444:1 + rt vpn both 52:100 + export vpn + import vpn + exit-address-family + address-family ipv6 unicast + neighbor 192:167::3 activate + neighbor 192:167::3 soft-reconfiguration inbound + label vpn export 103 + rd vpn export 555:1 + rt vpn both 54:200 + export vpn + import vpn + exit-address-family +exit diff --git a/tests/topotests/bgp_bmp/r1import/show-bgp-vrf1-ipv4-update-step1.json b/tests/topotests/bgp_bmp/r1import/show-bgp-vrf1-ipv4-update-step1.json new file mode 100644 index 0000000000..c21a586c3b --- /dev/null +++ b/tests/topotests/bgp_bmp/r1import/show-bgp-vrf1-ipv4-update-step1.json @@ -0,0 +1,21 @@ +{ + "routes": { + "172.31.0.77/32": [ + { + "bestpath": true, + "pathFrom": "internal", + "path": "", + "origin": "IGP", + "nexthops": [ + { + "ip": "192.168.1.3", + "hostname": "r3", + "afi": "ipv4", + "used": true + } + ] + } + ] + } +} + diff --git a/tests/topotests/bgp_bmp/r1import/show-bgp-vrf1-ipv4-withdraw-step1.json b/tests/topotests/bgp_bmp/r1import/show-bgp-vrf1-ipv4-withdraw-step1.json new file mode 100644 index 0000000000..154bef7995 --- /dev/null +++ b/tests/topotests/bgp_bmp/r1import/show-bgp-vrf1-ipv4-withdraw-step1.json @@ -0,0 +1,6 @@ +{ + "routes": { + "172.31.0.77/32": null + } +} + diff --git a/tests/topotests/bgp_bmp/r1import/show-bgp-vrf1-ipv6-update-step1.json b/tests/topotests/bgp_bmp/r1import/show-bgp-vrf1-ipv6-update-step1.json new file mode 100644 index 0000000000..14df5ec931 --- /dev/null +++ b/tests/topotests/bgp_bmp/r1import/show-bgp-vrf1-ipv6-update-step1.json @@ -0,0 +1,27 @@ +{ + "routes": { + "2001::1125/128": [ + { + "bestpath": true, + "pathFrom": "internal", + "path": "", + "origin": "IGP", + "nexthops": [ + { + "ip": "192:167::3", + "hostname": "r3", + "afi": "ipv6", + "scope": "global" + }, + { + "hostname": "r3", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ] + } +} + diff --git a/tests/topotests/bgp_bmp/r1import/show-bgp-vrf1-ipv6-withdraw-step1.json b/tests/topotests/bgp_bmp/r1import/show-bgp-vrf1-ipv6-withdraw-step1.json new file mode 100644 index 0000000000..7c7a95e33e --- /dev/null +++ b/tests/topotests/bgp_bmp/r1import/show-bgp-vrf1-ipv6-withdraw-step1.json @@ -0,0 +1,6 @@ +{ + "routes": { + "2001::1125/128": null + } +} + diff --git a/tests/topotests/bgp_bmp/r3/frr.conf b/tests/topotests/bgp_bmp/r3/frr.conf new file mode 100644 index 0000000000..145e156b11 --- /dev/null +++ b/tests/topotests/bgp_bmp/r3/frr.conf @@ -0,0 +1,18 @@ +interface r3-eth0 + ip address 192.168.1.3/24 + ipv6 address 192:167::3/64 +! +router bgp 65501 + bgp router-id 192.168.1.3 + bgp log-neighbor-changes + no bgp network import-check + neighbor 192.168.1.1 remote-as 65501 + neighbor 192:167::1 remote-as 65501 + address-family ipv4 unicast + neighbor 192.168.1.1 activate + no neighbor 192:167::1 activate + exit-address-family + address-family ipv6 unicast + neighbor 192:167::1 activate + exit-address-family +exit diff --git a/tests/topotests/bgp_bmp/test_bgp_bmp_1.py b/tests/topotests/bgp_bmp/test_bgp_bmp_1.py index be3e07929a..1d7aa97473 100644 --- a/tests/topotests/bgp_bmp/test_bgp_bmp_1.py +++ b/tests/topotests/bgp_bmp/test_bgp_bmp_1.py @@ -78,7 +78,7 @@ def setup_module(mod): "tcpdump -nni r1-eth0 -s 0 -w {} &".format(pcap_file), stdout=None ) - for rname, router in tgen.routers().items(): + for _, (rname, router) in enumerate(tgen.routers().items(), 1): logger.info("Loading router %s" % rname) router.load_frr_config( os.path.join(CWD, "{}/frr.conf".format(rname)), diff --git a/tests/topotests/bgp_bmp/test_bgp_bmp_2.py b/tests/topotests/bgp_bmp/test_bgp_bmp_2.py index f16ff2b445..e0b9a0f607 100644 --- a/tests/topotests/bgp_bmp/test_bgp_bmp_2.py +++ b/tests/topotests/bgp_bmp/test_bgp_bmp_2.py @@ -252,6 +252,169 @@ def test_peer_down(): assert success, "Checking the updated prefixes has been failed !." +def test_bgp_instance_flapping(): + """ + Checking for BGP loc-rib up messages + """ + tgen = get_topogen() + + # create flapping at BMP + tgen.net["r1vrf"].cmd("ip link set dev vrf1 down") + + peers = ["0.0.0.0"] + logger.info("checking for BMP peer down LOC-RIB message.") + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer down", + tgen.gears["bmp1vrf"], + os.path.join(tgen.logdir, "bmp1vrf", "bmp.log"), + is_rd_instance=True, + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert success, "Checking the BMP peer down LOC-RIB message failed !." + + tgen.net["r1vrf"].cmd("ip link set dev vrf1 up") + + logger.info("checking for BMP peer up LOC-RIB message.") + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer up", + tgen.gears["bmp1vrf"], + os.path.join(tgen.logdir, "bmp1vrf", "bmp.log"), + is_rd_instance=True, + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert success, "Checking the BMP peer up LOC-RIB message failed !." + + +def test_bgp_routerid_changed(): + """ + Checking for BGP loc-rib up messages with new router-id + """ + tgen = get_topogen() + + tgen.gears["r1vrf"].vtysh_cmd( + """ + configure terminal + router bgp 65501 vrf vrf1 + bgp router-id 192.168.1.77 + """ + ) + + peers = ["0.0.0.0"] + + logger.info( + "checking for BMP peer down LOC-RIB message with router-id set to 192.168.0.1." + ) + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer down", + tgen.gears["bmp1vrf"], + os.path.join(tgen.logdir, "bmp1vrf", "bmp.log"), + is_rd_instance=True, + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert ( + success + ), "Checking the BMP peer down LOC-RIB message with router-id set to 192.168.0.1 failed !." + + logger.info( + "checking for BMP peer up LOC-RIB message with router-id set to 192.168.1.77." + ) + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer up", + tgen.gears["bmp1vrf"], + os.path.join(tgen.logdir, "bmp1vrf", "bmp.log"), + is_rd_instance=True, + peer_bgp_id="192.168.1.77", + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert ( + success + ), "Checking the BMP peer up LOC-RIB message with router-id set to 192.168.1.77 failed !." + + +def test_reconfigure_route_distinguisher_vrf1(): + """ + Checking for BMP peers down messages + """ + tgen = get_topogen() + + bmp_update_seq( + tgen.gears["bmp1vrf"], os.path.join(tgen.logdir, "bmp1vrf", "bmp.log") + ) + peers = ["0.0.0.0"] + + tgen.gears["r1vrf"].vtysh_cmd( + """ + configure terminal + router bgp 65501 vrf vrf1 + address-family ipv4 unicast + rd vpn export 666:22 + exit-address-family + address-family ipv6 unicast + rd vpn export 666:22 + """ + ) + logger.info( + "checking for BMP peer down LOC-RIB message with route-distinguisher set to 444:1" + ) + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer down", + tgen.gears["bmp1vrf"], + os.path.join(tgen.logdir, "bmp1vrf", "bmp.log"), + is_rd_instance=True, + peer_distinguisher="444:1", + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert ( + success + ), "Checking the BMP peer down LOC-RIB message with route-distinguisher set to 444:1 failed !." + + logger.info( + "checking for BMP peer up LOC-RIB messages with route-distinguisher set to 666:22" + ) + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer up", + tgen.gears["bmp1vrf"], + os.path.join(tgen.logdir, "bmp1vrf", "bmp.log"), + is_rd_instance=True, + peer_bgp_id="192.168.1.77", + peer_distinguisher="666:22", + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert ( + success + ), "Checking the BMP peer up LOC-RIB message with route-distinguisher set to 666:22 failed !." + + logger.info( + "checking for BMP peer up messages with route-distinguisher set to 666:22" + ) + peers = ["192.168.0.2", "192:168::2"] + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer up", + tgen.gears["bmp1vrf"], + os.path.join(tgen.logdir, "bmp1vrf", "bmp.log"), + is_rd_instance=True, + peer_distinguisher="666:22", + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert ( + success + ), "Checking the BMP peer up messages with route-distinguisher set to 666:22 failed !." + + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_bmp/test_bgp_bmp_3.py b/tests/topotests/bgp_bmp/test_bgp_bmp_3.py new file mode 100644 index 0000000000..212cf9e696 --- /dev/null +++ b/tests/topotests/bgp_bmp/test_bgp_bmp_3.py @@ -0,0 +1,567 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# Copyright 2024 6WIND S.A. +# + +""" +test_bgp_bmp.py_3: Test BGP BMP functionalities + + +------+ +------+ +------+ + | | | | | | + | BMP1 |------------| R1 |---------------| R2 | + | | | | | | + +------+ +--+---+ +------+ + | + +--+---+ + | | + | R3 | + | | + +------+ + +Setup two routers R1 and R2 with one link configured with IPv4 and +IPv6 addresses. +Configure BGP in R1 and R2 to exchange prefixes from +the latter to the first router. +Setup a link between R1 and the BMP server, activate the BMP feature in R1 +and ensure the monitored BGP sessions logs are well present on the BMP server. +""" + +from functools import partial +import json +import os +import pytest +import sys + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join("../")) +sys.path.append(os.path.join("../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.bgp import verify_bgp_convergence_from_running_config +from lib.bgp import bgp_configure_prefixes +from .bgpbmp import ( + bmp_check_for_prefixes, + bmp_check_for_peer_message, + bmp_update_seq, + bmp_reset_seq, +) +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +pytestmark = [pytest.mark.bgpd] + +PRE_POLICY = "pre-policy" +POST_POLICY = "post-policy" +LOC_RIB = "loc-rib" + +UPDATE_EXPECTED_JSON = False +DEBUG_PCAP = False + + +def build_topo(tgen): + tgen.add_router("r1import") + tgen.add_router("r2") + tgen.add_router("r3") # CPE behind r1 + + tgen.add_bmp_server("bmp1import", ip="192.0.2.10", defaultRoute="via 192.0.2.1") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1import"]) + switch.add_link(tgen.gears["bmp1import"]) + + tgen.add_link(tgen.gears["r1import"], tgen.gears["r2"], "r1import-eth1", "r2-eth0") + tgen.add_link(tgen.gears["r1import"], tgen.gears["r3"], "r1import-eth2", "r3-eth0") + + +def setup_module(mod): + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + tgen.net["r1import"].cmd( + """ +ip link add vrf1 type vrf table 10 +ip link set vrf1 up +ip link set r1import-eth2 master vrf1 + """ + ) + + bmp_reset_seq() + if DEBUG_PCAP: + tgen.gears["r1import"].run("rm /tmp/bmp.pcap") + tgen.gears["r1import"].run( + "tcpdump -nni r1import-eth0 -s 0 -w /tmp/bmp.pcap &", stdout=None + ) + + for rname, router in tgen.routers().items(): + logger.info("Loading router %s" % rname) + router.load_frr_config( + os.path.join(CWD, "{}/frr.conf".format(rname)), + [(TopoRouter.RD_ZEBRA, None), (TopoRouter.RD_BGP, "-M bmp")], + ) + + tgen.start_router() + + logger.info("starting BMP servers") + for bmp_name, server in tgen.get_bmp_servers().items(): + server.start(log_file=os.path.join(tgen.logdir, bmp_name, "bmp.log")) + + +def teardown_module(_mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_convergence(): + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + result = verify_bgp_convergence_from_running_config(tgen, dut="r1import") + assert result is True, "BGP is not converging" + + +def _test_prefixes_syncro(policy, vrf=None, step=1): + """ + Check that the given policy has syncronised the previously received BGP + updates. + """ + tgen = get_topogen() + + prefixes = ["172.31.0.77/32", "2001::1125/128"] + # check + test_func = partial( + bmp_check_for_prefixes, + prefixes, + "update", + policy, + step, + tgen.gears["bmp1import"], + os.path.join(tgen.logdir, "bmp1import"), + tgen.gears["r1import"], + f"{CWD}/bmp1import", + UPDATE_EXPECTED_JSON, + LOC_RIB, + ) + success, res = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert success, "Checking the updated prefixes has failed ! %s" % res + + +def _test_prefixes(policy, vrf=None, step=0): + """ + Setup the BMP monitor policy, Add and withdraw ipv4/v6 prefixes. + Check if the previous actions are logged in the BMP server with the right + message type and the right policy. + """ + tgen = get_topogen() + + safi = "vpn" if vrf else "unicast" + + prefixes = ["172.31.0.77/32", "2001::1125/128"] + + for type in ("update", "withdraw"): + bmp_update_seq( + tgen.gears["bmp1import"], os.path.join(tgen.logdir, "bmp1import", "bmp.log") + ) + + bgp_configure_prefixes( + tgen.gears["r3"], + 65501, + "unicast", + prefixes, + vrf=None, + update=(type == "update"), + ) + + logger.info(f"checking for prefixes {type}") + + for ipver in [4, 6]: + if UPDATE_EXPECTED_JSON: + continue + ref_file = "{}/r1import/show-bgp-{}-ipv{}-{}-step{}.json".format( + CWD, vrf, ipver, type, step + ) + expected = json.loads(open(ref_file).read()) + + test_func = partial( + topotest.router_json_cmp, + tgen.gears["r1import"], + f"show bgp vrf {vrf} ipv{ipver} json", + expected, + ) + _, res = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = f"r1: BGP IPv{ipver} convergence failed" + assert res is None, assertmsg + + # check + test_func = partial( + bmp_check_for_prefixes, + prefixes, + type, + policy, + step, + tgen.gears["bmp1import"], + os.path.join(tgen.logdir, "bmp1import"), + tgen.gears["r1import"], + f"{CWD}/bmp1import", + UPDATE_EXPECTED_JSON, + LOC_RIB, + ) + success, res = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert success, "Checking the updated prefixes has failed ! %s" % res + + +def _test_peer_up(check_locrib=True): + """ + Checking for BMP peers up messages + """ + + tgen = get_topogen() + if check_locrib: + peers = ["0.0.0.0", "192.168.1.3", "192:167::3"] + else: + peers = ["192.168.1.3", "192:167::3"] + + logger.info("checking for BMP peers up messages") + + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer up", + tgen.gears["bmp1import"], + os.path.join(tgen.logdir, "bmp1import", "bmp.log"), + is_rd_instance=True, + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert success, "Checking the updated prefixes has been failed !." + + +def test_bmp_server_logging(): + """ + Assert the logging of the bmp server. + """ + + def check_for_log_file(): + tgen = get_topogen() + output = tgen.gears["bmp1import"].run( + "ls {}".format(os.path.join(tgen.logdir, "bmp1import")) + ) + if "bmp.log" not in output: + return False + return True + + success, _ = topotest.run_and_expect(check_for_log_file, True, count=30, wait=1) + assert success, "The BMP server is not logging" + + +def test_bmp_peer_up_start(): + _test_peer_up() + + +def test_bmp_bgp_unicast(): + """ + Add/withdraw bgp unicast prefixes and check the bmp logs. + """ + logger.info("*** Unicast prefixes pre-policy logging ***") + _test_prefixes(PRE_POLICY, vrf="vrf1", step=1) + logger.info("*** Unicast prefixes post-policy logging ***") + _test_prefixes(POST_POLICY, vrf="vrf1", step=1) + logger.info("*** Unicast prefixes loc-rib logging ***") + _test_prefixes(LOC_RIB, vrf="vrf1", step=1) + + +def test_peer_down(): + """ + Checking for BMP peers down messages + """ + tgen = get_topogen() + + tgen.gears["r3"].vtysh_cmd("clear bgp *") + + peers = ["192.168.1.3", "192:167::3"] + + logger.info("checking for BMP peers down messages") + + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer down", + tgen.gears["bmp1import"], + os.path.join(tgen.logdir, "bmp1import", "bmp.log"), + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert success, "Checking the updated prefixes has been failed !." + + +def test_reconfigure_prefixes(): + """ + Reconfigured BGP networks from R3. Check for BGP VRF update messages + """ + + tgen = get_topogen() + + prefixes = ["172.31.0.77/32", "2001::1125/128"] + bgp_configure_prefixes( + tgen.gears["r3"], + 65501, + "unicast", + prefixes, + vrf=None, + update=True, + ) + + for ipver in [4, 6]: + ref_file = "{}/r1import/show-bgp-{}-ipv{}-{}-step{}.json".format( + CWD, "vrf1", ipver, "update", 1 + ) + expected = json.loads(open(ref_file).read()) + + test_func = partial( + topotest.router_json_cmp, + tgen.gears["r1import"], + f"show bgp vrf vrf1 ipv{ipver} json", + expected, + ) + _, res = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = f"r1: BGP IPv{ipver} convergence failed" + assert res is None, assertmsg + + +def test_monitor_syncro(): + """ + Checking for BMP peers down messages + """ + tgen = get_topogen() + + tgen.gears["r1import"].vtysh_cmd( + """ + configure terminal + router bgp 65501 + bmp targets bmp1 + bmp import-vrf-view vrf1 + """ + ) + + logger.info("*** Unicast prefixes pre-policy logging ***") + _test_prefixes_syncro(PRE_POLICY, vrf="vrf1") + logger.info("*** Unicast prefixes post-policy logging ***") + _test_prefixes_syncro(POST_POLICY, vrf="vrf1") + logger.info("*** Unicast prefixes loc-rib logging ***") + _test_prefixes_syncro(LOC_RIB, vrf="vrf1") + + +def test_reconfigure_route_distinguisher_vrf1(): + """ + Checking for BMP peers down messages + """ + tgen = get_topogen() + + bmp_update_seq( + tgen.gears["bmp1import"], os.path.join(tgen.logdir, "bmp1import", "bmp.log") + ) + peers = ["0.0.0.0"] + + tgen.gears["r1import"].vtysh_cmd( + """ + configure terminal + router bgp 65501 vrf vrf1 + address-family ipv4 unicast + rd vpn export 666:22 + exit-address-family + address-family ipv6 unicast + rd vpn export 666:22 + """ + ) + logger.info( + "Checking for BMP peer down LOC-RIB message with route-distinguisher set to 444:1" + ) + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer down", + tgen.gears["bmp1import"], + os.path.join(tgen.logdir, "bmp1import", "bmp.log"), + is_rd_instance=True, + peer_distinguisher="444:1", + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert ( + success + ), "Checking the BMP peer down LOC-RIB message with route-distinguisher set to 444:1 failed !." + + logger.info( + "Checking for BMP peer up LOC-RIB messages with route-distinguisher set to 666:22" + ) + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer up", + tgen.gears["bmp1import"], + os.path.join(tgen.logdir, "bmp1import", "bmp.log"), + is_rd_instance=True, + peer_distinguisher="666:22", + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert ( + success + ), "Checking the BMP peer up LOC-RIB message with route-distinguisher set to 666:22 failed !." + + logger.info( + "Checking for BMP peer up messages with route-distinguisher set to 666:22" + ) + peers = ["192.168.1.3", "192:167::3"] + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer up", + tgen.gears["bmp1import"], + os.path.join(tgen.logdir, "bmp1import", "bmp.log"), + is_rd_instance=True, + peer_distinguisher="666:22", + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert ( + success + ), "Checking the BMP peer up messages with route-distinguisher set to 666:22 failed !." + + logger.info("*** Unicast prefixes pre-policy logging ***") + _test_prefixes_syncro(PRE_POLICY, vrf="vrf1", step=2) + logger.info("*** Unicast prefixes post-policy logging ***") + _test_prefixes_syncro(POST_POLICY, vrf="vrf1", step=2) + logger.info("*** Unicast prefixes loc-rib logging ***") + _test_prefixes_syncro(LOC_RIB, vrf="vrf1", step=2) + + +def test_bgp_routerid_changed(): + """ + Checking for BGP loc-rib up messages with new router-id + """ + tgen = get_topogen() + + tgen.gears["r1import"].vtysh_cmd( + """ + configure terminal + router bgp 65501 vrf vrf1 + bgp router-id 192.168.1.77 + """ + ) + + peers = ["0.0.0.0"] + + logger.info( + "checking for BMP peer down LOC-RIB message with router-id set to 192.168.0.1." + ) + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer down", + tgen.gears["bmp1import"], + os.path.join(tgen.logdir, "bmp1import", "bmp.log"), + is_rd_instance=True, + peer_bgp_id="192.168.0.1", + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert ( + success + ), "Checking the BMP peer down LOC-RIB message with router-id set to 192.168.0.1 failed !." + + logger.info( + "checking for BMP peer up LOC-RIB message with router-id set to 192.168.1.77." + ) + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer up", + tgen.gears["bmp1import"], + os.path.join(tgen.logdir, "bmp1import", "bmp.log"), + is_rd_instance=True, + peer_bgp_id="192.168.1.77", + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert ( + success + ), "Checking the BMP peer up LOC-RIB message with router-id set to 192.168.1.77 failed !." + + +def test_bgp_instance_flapping(): + """ + Checking for BGP loc-rib up messages + """ + tgen = get_topogen() + + # create flapping at BMP + # note: only peer up are handled at BMP level today + tgen.net["r1import"].cmd("ip link set dev vrf1 down") + + peers = ["0.0.0.0"] + + logger.info("checking for BMP peer down LOC-RIB message.") + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer down", + tgen.gears["bmp1import"], + os.path.join(tgen.logdir, "bmp1import", "bmp.log"), + is_rd_instance=True, + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert success, "Checking the BMP peer down LOC-RIB message failed !." + + tgen.net["r1import"].cmd("ip link set dev vrf1 up") + + logger.info("checking for BMP peer up LOC-RIB message.") + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer up", + tgen.gears["bmp1import"], + os.path.join(tgen.logdir, "bmp1import", "bmp.log"), + is_rd_instance=True, + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert success, "Checking the BMP peer up LOC-RIB message failed !." + + +def test_peer_up_after_flush(): + """ + Checking for BMP peers down messages + """ + _test_peer_up(check_locrib=False) + + +def test_peer_down_locrib(): + """ + Checking for BMP peers down loc-rib messages + """ + tgen = get_topogen() + + tgen.gears["r1import"].vtysh_cmd( + """ + configure terminal + router bgp 65501 + bmp targets bmp1 + no bmp import-vrf-view vrf1 + """ + ) + + peers = ["0.0.0.0"] + + logger.info("checking for BMP peers down messages") + + test_func = partial( + bmp_check_for_peer_message, + peers, + "peer down", + tgen.gears["bmp1import"], + os.path.join(tgen.logdir, "bmp1import", "bmp.log"), + ) + success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1) + assert success, "Checking the BMP peer down message has failed !." + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_comm_list_match/r1/bgpd.conf b/tests/topotests/bgp_comm_list_match/r1/bgpd.conf index bac8412088..d7d58d22ae 100644 --- a/tests/topotests/bgp_comm_list_match/r1/bgpd.conf +++ b/tests/topotests/bgp_comm_list_match/r1/bgpd.conf @@ -12,6 +12,8 @@ router bgp 65001 ip prefix-list p1 seq 5 permit 172.16.255.1/32 ip prefix-list p3 seq 5 permit 172.16.255.3/32 ip prefix-list p4 seq 5 permit 172.16.255.4/32 +ip prefix-list p5 seq 5 permit 172.16.255.5/32 +ip prefix-list p6 seq 5 permit 172.16.255.6/32 ! route-map r2 permit 10 match ip address prefix-list p1 @@ -24,5 +26,13 @@ route-map r2 permit 30 set community 65001:10 65001:12 65001:13 exit route-map r2 permit 40 + match ip address prefix-list p5 + set community 65001:13 65001:14 +exit +route-map r2 permit 50 + match ip address prefix-list p6 + set community 65001:16 65001:17 65001:18 65001:19 +exit +route-map r2 permit 60 exit ! diff --git a/tests/topotests/bgp_comm_list_match/r1/zebra.conf b/tests/topotests/bgp_comm_list_match/r1/zebra.conf index 4219a7ca3a..1b19a4a12b 100644 --- a/tests/topotests/bgp_comm_list_match/r1/zebra.conf +++ b/tests/topotests/bgp_comm_list_match/r1/zebra.conf @@ -4,6 +4,8 @@ interface lo ip address 172.16.255.2/32 ip address 172.16.255.3/32 ip address 172.16.255.4/32 + ip address 172.16.255.5/32 + ip address 172.16.255.6/32 ! interface r1-eth0 ip address 192.168.0.1/24 diff --git a/tests/topotests/bgp_comm_list_match/test_bgp_comm_list_match.py b/tests/topotests/bgp_comm_list_match/test_bgp_comm_list_match.py index d0cab26e1a..c14ef6b8c3 100644 --- a/tests/topotests/bgp_comm_list_match/test_bgp_comm_list_match.py +++ b/tests/topotests/bgp_comm_list_match/test_bgp_comm_list_match.py @@ -133,6 +133,70 @@ def test_bgp_comm_list_match_any(): assert result is None, "Failed to filter BGP UPDATES with community-list on R3" +def test_bgp_comm_list_limit_match(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + router = tgen.gears["r3"] + router.vtysh_cmd( + """ + configure terminal + route-map r1 permit 20 + match community-limit 3 + """ + ) + + def _bgp_count(): + output = json.loads(router.vtysh_cmd("show bgp ipv4 json")) + expected = { + "vrfName": "default", + "routerId": "192.168.1.3", + "localAS": 65003, + "totalRoutes": 3, + "totalPaths": 3, + } + return topotest.json_cmp(output, expected) + + step("Check that 3 routes have been received on R3") + test_func = functools.partial(_bgp_count) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to check that 3 routes have been received on R3" + + +def test_bgp_comm_list_reset_limit_match(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + router = tgen.gears["r3"] + router.vtysh_cmd( + """ + configure terminal + route-map r1 permit 20 + no match community-limit + """ + ) + + def _bgp_count_two(): + output = json.loads(router.vtysh_cmd("show bgp ipv4 json")) + expected = { + "vrfName": "default", + "routerId": "192.168.1.3", + "localAS": 65003, + "totalRoutes": 4, + "totalPaths": 4, + } + return topotest.json_cmp(output, expected) + + step("Check that 4 routes have been received on R3") + test_func = functools.partial(_bgp_count_two) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to check that 4 routes have been received on R3" + + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_dynamic_capability/r1/frr.conf b/tests/topotests/bgp_dynamic_capability/r1/frr.conf index c9594626f5..d91913e15e 100644 --- a/tests/topotests/bgp_dynamic_capability/r1/frr.conf +++ b/tests/topotests/bgp_dynamic_capability/r1/frr.conf @@ -3,6 +3,7 @@ ! int r1-eth0 ip address 192.168.1.1/24 + ipv6 address 2001:db8::1/64 ! router bgp 65001 no bgp ebgp-requires-policy @@ -12,11 +13,19 @@ router bgp 65001 neighbor 192.168.1.2 timers 1 3 neighbor 192.168.1.2 timers connect 1 neighbor 192.168.1.2 capability dynamic + neighbor 2001:db8::2 remote-as external + neighbor 2001:db8::2 timers 1 3 + neighbor 2001:db8::2 timers connect 1 + neighbor 2001:db8::2 capability dynamic ! address-family ipv4 unicast neighbor 192.168.1.2 addpath-tx-all-paths neighbor 192.168.1.2 addpath-rx-paths-limit 10 exit-address-family + ! + address-family ipv6 unicast + neighbor 2001:db8::2 activate + exit-address-family ! ip prefix-list r2 seq 5 permit 10.10.10.10/32 ! diff --git a/tests/topotests/bgp_dynamic_capability/r2/frr.conf b/tests/topotests/bgp_dynamic_capability/r2/frr.conf index 3cc1f1fc39..cca07078ea 100644 --- a/tests/topotests/bgp_dynamic_capability/r2/frr.conf +++ b/tests/topotests/bgp_dynamic_capability/r2/frr.conf @@ -7,6 +7,7 @@ int lo ! int r2-eth0 ip address 192.168.1.2/24 + ipv6 address 2001:db8::2/64 ! router bgp 65002 bgp graceful-restart @@ -16,9 +17,22 @@ router bgp 65002 neighbor 192.168.1.1 timers 1 3 neighbor 192.168.1.1 timers connect 1 neighbor 192.168.1.1 capability dynamic - neighbor 192.168.1.1 addpath-rx-paths-limit 20 + neighbor 192.168.1.1 capability extended-nexthop + neighbor 2001:db8::1 remote-as external + neighbor 2001:db8::1 timers 1 3 + neighbor 2001:db8::1 timers connect 1 + neighbor 2001:db8::1 capability dynamic + neighbor 2001:db8::1 capability extended-nexthop ! address-family ipv4 unicast redistribute connected + neighbor 192.168.1.1 addpath-tx-all-paths + neighbor 192.168.1.1 disable-addpath-rx + neighbor 192.168.1.1 addpath-rx-paths-limit 20 + exit-address-family + ! + address-family ipv6 unicast + redistribute connected + neighbor 2001:db8::1 activate exit-address-family ! diff --git a/tests/topotests/bgp_dynamic_capability/test_bgp_dynamic_capability_enhe.py b/tests/topotests/bgp_dynamic_capability/test_bgp_dynamic_capability_enhe.py new file mode 100644 index 0000000000..fd467b8c3b --- /dev/null +++ b/tests/topotests/bgp_dynamic_capability/test_bgp_dynamic_capability_enhe.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# Copyright (c) 2024 by +# Donatas Abraitis <donatas@opensourcerouting.org> +# + +""" +Test if extended nexthop capability is exchanged dynamically. +""" + +import os +import sys +import json +import pytest +import functools +import time + +pytestmark = [pytest.mark.bgpd] + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, get_topogen +from lib.common_config import step + + +def setup_module(mod): + topodef = {"s1": ("r1", "r2")} + tgen = Topogen(topodef, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for _, (rname, router) in enumerate(router_list.items(), 1): + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_dynamic_capability_enhe(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + r2 = tgen.gears["r2"] + + def _bgp_converge(): + output = json.loads(r1.vtysh_cmd("show bgp neighbor 2001:db8::2 json")) + expected = { + "2001:db8::2": { + "bgpState": "Established", + "localRole": "undefined", + "remoteRole": "undefined", + "neighborCapabilities": { + "dynamic": "advertisedAndReceived", + "extendedNexthop": "received", + }, + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_converge, + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't converge" + + def _bgp_check_nexthop(): + output = json.loads(r1.vtysh_cmd("show ip route 10.10.10.10/32 json")) + expected = { + "10.10.10.10/32": [ + { + "protocol": "bgp", + "selected": True, + "installed": True, + "nexthops": [ + { + "fib": True, + "ip": "192.168.1.2", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "active": True, + }, + { + "duplicate": True, + "ip": "192.168.1.2", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "active": True, + }, + ], + } + ] + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_check_nexthop, + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't see 10.10.10.10/32 with IPv4 only nexthops" + + step("Enable ENHE capability") + + # Clear message stats to check if we receive a notification or not after we + # change the role. + r2.vtysh_cmd("clear bgp 2001:db8::1 message-stats") + r1.vtysh_cmd( + """ + configure terminal + router bgp + neighbor 2001:db8::2 capability extended-nexthop + """ + ) + + def _bgp_check_if_session_not_reset(): + output = json.loads(r2.vtysh_cmd("show bgp neighbor 2001:db8::1 json")) + expected = { + "2001:db8::1": { + "bgpState": "Established", + "neighborCapabilities": { + "dynamic": "advertisedAndReceived", + "extendedNexthop": "advertisedAndReceived", + "extendedNexthopFamililesByPeer": { + "ipv4Unicast": "recieved", + }, + }, + "messageStats": { + "notificationsRecv": 0, + "capabilityRecv": 1, + }, + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_check_if_session_not_reset, + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Session was reset after setting ENHE capability" + + def _bgp_check_nexthop_enhe(): + output = json.loads(r1.vtysh_cmd("show ip route 10.10.10.10/32 json")) + expected = { + "10.10.10.10/32": [ + { + "protocol": "bgp", + "selected": True, + "installed": True, + "nexthops": [ + { + "fib": True, + "ip": "192.168.1.2", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "active": True, + }, + { + "fib": True, + "afi": "ipv6", + "interfaceName": "r1-eth0", + "active": True, + }, + ], + } + ] + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_check_nexthop_enhe, + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't see 10.10.10.10/32 with IPv4 only nexthops" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_dynamic_capability/test_bgp_dynamic_capability_addpath.py b/tests/topotests/bgp_dynamic_capability/test_bgp_dynamic_capability_path_limit.py index 91df89b1b5..22e4fe687b 100644 --- a/tests/topotests/bgp_dynamic_capability/test_bgp_dynamic_capability_addpath.py +++ b/tests/topotests/bgp_dynamic_capability/test_bgp_dynamic_capability_path_limit.py @@ -9,8 +9,6 @@ Test if Addpath/Paths-Limit capabilities are adjusted dynamically. T1: Enable Addpath/Paths-Limit capabilities and check if they are exchanged dynamically T2: Disable paths limit and check if it's exchanged dynamically -T3: Disable Addpath capability RX and check if it's exchanged dynamically -T4: Disable Addpath capability and check if it's exchanged dynamically """ import os @@ -65,12 +63,12 @@ def test_bgp_addpath_paths_limit(): "dynamic": "advertisedAndReceived", "addPath": { "ipv4Unicast": { - "txAdvertisedAndReceived": False, + "txAdvertisedAndReceived": True, "txAdvertised": True, - "txReceived": False, - "rxAdvertisedAndReceived": True, + "txReceived": True, + "rxAdvertisedAndReceived": False, "rxAdvertised": True, - "rxReceived": True, + "rxReceived": False, } }, "pathsLimit": { @@ -105,7 +103,6 @@ def test_bgp_addpath_paths_limit(): configure terminal router bgp address-family ipv4 unicast - neighbor 192.168.1.1 addpath-tx-all-paths neighbor 192.168.1.1 addpath-rx-paths-limit 21 """ ) @@ -122,9 +119,9 @@ def test_bgp_addpath_paths_limit(): "txAdvertisedAndReceived": True, "txAdvertised": True, "txReceived": True, - "rxAdvertisedAndReceived": True, + "rxAdvertisedAndReceived": False, "rxAdvertised": True, - "rxReceived": True, + "rxReceived": False, } }, "pathsLimit": { @@ -143,7 +140,7 @@ def test_bgp_addpath_paths_limit(): "messageStats": { "notificationsRecv": 0, "notificationsSent": 0, - "capabilityRecv": 2, + "capabilityRecv": 1, }, } } @@ -181,58 +178,6 @@ def test_bgp_addpath_paths_limit(): "txAdvertisedAndReceived": True, "txAdvertised": True, "txReceived": True, - "rxAdvertisedAndReceived": True, - "rxAdvertised": True, - "rxReceived": True, - } - }, - "pathsLimit": { - "ipv4Unicast": { - "advertisedAndReceived": True, - "advertisedPathsLimit": 10, - "receivedPathsLimit": 0, - } - }, - }, - "messageStats": { - "notificationsRecv": 0, - "notificationsSent": 0, - "capabilityRecv": 3, - }, - } - } - return topotest.json_cmp(output, expected) - - test_func = functools.partial( - _disable_paths_limit, - ) - _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) - assert result is None, "Something went wrong after disabling paths limit" - - ### - # T3: Disable Addpath capability RX and check if it's exchanged dynamically - ### - r2.vtysh_cmd( - """ - configure terminal - router bgp - address-family ipv4 unicast - neighbor 192.168.1.1 disable-addpath-rx - """ - ) - - def _disable_addpath_rx(): - output = json.loads(r1.vtysh_cmd("show bgp neighbor json")) - expected = { - "192.168.1.2": { - "bgpState": "Established", - "neighborCapabilities": { - "dynamic": "advertisedAndReceived", - "addPath": { - "ipv4Unicast": { - "txAdvertisedAndReceived": True, - "txAdvertised": True, - "txReceived": True, "rxAdvertisedAndReceived": False, "rxAdvertised": True, "rxReceived": False, @@ -249,63 +194,17 @@ def test_bgp_addpath_paths_limit(): "messageStats": { "notificationsRecv": 0, "notificationsSent": 0, - "capabilityRecv": 4, - }, - } - } - return topotest.json_cmp(output, expected) - - test_func = functools.partial( - _disable_addpath_rx, - ) - _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) - assert result is None, "Something went wrong after disabling Addpath RX flags" - - ### - # T4: Disable Addpath capability and check if it's exchanged dynamically - ### - r1.vtysh_cmd( - """ - configure terminal - router bgp - address-family ipv4 unicast - no neighbor 192.168.1.2 addpath-tx-all-paths - """ - ) - - def _disable_addpath(): - output = json.loads(r1.vtysh_cmd("show bgp neighbor json")) - expected = { - "192.168.1.2": { - "bgpState": "Established", - "neighborCapabilities": { - "dynamic": "advertisedAndReceived", - "addPath": { - "ipv4Unicast": { - "txAdvertisedAndReceived": False, - "txAdvertised": False, - "txReceived": True, - "rxAdvertisedAndReceived": False, - "rxAdvertised": True, - "rxReceived": False, - } - }, - }, - "messageStats": { - "notificationsRecv": 0, - "notificationsSent": 0, - "capabilitySent": 1, - "capabilityRecv": 4, + "capabilityRecv": 2, }, } } return topotest.json_cmp(output, expected) test_func = functools.partial( - _disable_addpath, + _disable_paths_limit, ) _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) - assert result is None, "Something went wrong when disabling Addpath capability" + assert result is None, "Something went wrong after disabling paths limit" if __name__ == "__main__": diff --git a/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes.json b/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes.json index 7532ce9331..cfab5726ed 100644 --- a/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes.json +++ b/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes.json @@ -2,8 +2,8 @@ "bgpLocalRouterId":"192.168.100.21", "defaultLocPrf":100, "localAS":65000, - "192.168.101.41:2":{ - "rd":"192.168.101.41:2", + "65000:201":{ + "rd":"65000:201", "[5]:[0]:[32]:[192.168.101.41]":{ "prefix":"[5]:[0]:[32]:[192.168.101.41]", "prefixLen":352, @@ -65,8 +65,8 @@ ] } }, - "192.168.102.21:2":{ - "rd":"192.168.102.21:2", + "65000:101":{ + "rd":"65000:101", "[5]:[0]:[32]:[192.168.102.21]":{ "prefix":"[5]:[0]:[32]:[192.168.102.21]", "prefixLen":352, diff --git a/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes_all.json b/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes_all.json index a14ba1291e..444c67e442 100644 --- a/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes_all.json +++ b/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes_all.json @@ -2,8 +2,8 @@ "bgpLocalRouterId":"192.168.100.21", "defaultLocPrf":100, "localAS":65000, - "192.168.101.41:2":{ - "rd":"192.168.101.41:2", + "65000:201":{ + "rd":"65000:201", "[5]:[0]:[32]:[192.168.101.41]":{ "prefix":"[5]:[0]:[32]:[192.168.101.41]", "prefixLen":352, @@ -125,8 +125,8 @@ ] } }, - "192.168.102.21:2":{ - "rd":"192.168.102.21:2", + "65000:101":{ + "rd":"65000:101", "[5]:[0]:[32]:[192.168.102.21]":{ "prefix":"[5]:[0]:[32]:[192.168.102.21]", "prefixLen":352, diff --git a/tests/topotests/bgp_evpn_rt5/r1/bgpd.conf b/tests/topotests/bgp_evpn_rt5/r1/frr.conf index c8c4faf222..e4a8059988 100644 --- a/tests/topotests/bgp_evpn_rt5/r1/bgpd.conf +++ b/tests/topotests/bgp_evpn_rt5/r1/frr.conf @@ -1,15 +1,36 @@ +! debug zebra vxlan +! debug zebra kernel +! debug zebra dplane +! debug zebra rib ! debug bgp neighbor-events ! debug bgp updates ! debug bgp zebra +vrf r1-vrf-101 + vni 101 + exit-vrf +! +interface r1-eth0 + ip address 192.168.100.21/24 +! +interface loop101 vrf r1-vrf-101 + ip address 192.168.102.21/32 + ipv6 address fd00::1/128 +! router bgp 65000 bgp router-id 192.168.100.21 bgp log-neighbor-changes no bgp default ipv4-unicast + no bgp ebgp-requires-policy neighbor 192.168.100.41 remote-as 65000 neighbor 192.168.100.41 capability extended-nexthop + neighbor 192.168.100.61 remote-as 65500 + neighbor 192.168.100.61 capability extended-nexthop ! address-family l2vpn evpn neighbor 192.168.100.41 activate + neighbor 192.168.100.41 route-map rmap_r1 in + neighbor 192.168.100.61 activate + neighbor 192.168.100.61 route-map rmap_r3 in advertise-all-vni exit-address-family ! @@ -24,7 +45,17 @@ router bgp 65000 vrf r1-vrf-101 network fd00::1/128 exit-address-family address-family l2vpn evpn + rd 65000:101 + route-target both 65:101 advertise ipv4 unicast advertise ipv6 unicast exit-address-family ! +route-map rmap_r3 deny 1 + match evpn vni 102 +exit +route-map rmap_r1 permit 1 + match evpn vni 101 +exit + + diff --git a/tests/topotests/bgp_evpn_rt5/r1/zebra.conf b/tests/topotests/bgp_evpn_rt5/r1/zebra.conf deleted file mode 100644 index c3d508c2b6..0000000000 --- a/tests/topotests/bgp_evpn_rt5/r1/zebra.conf +++ /dev/null @@ -1,23 +0,0 @@ -log stdout - -hostname r1 -password zebra - -! debug zebra vxlan -! debug zebra kernel -! debug zebra dplane -! debug zebra rib -log stdout -vrf r1-vrf-101 - vni 101 - exit-vrf -! -interface r1-eth0 - ip address 192.168.100.21/24 -! -interface loop101 vrf r1-vrf-101 - ip address 192.168.102.21/32 - ipv6 address fd00::1/128 -! - - diff --git a/tests/topotests/bgp_evpn_rt5/r2/bgp_l2vpn_evpn_routes.json b/tests/topotests/bgp_evpn_rt5/r2/bgp_l2vpn_evpn_routes.json index 597bca5fd3..3a55a7a38d 100644 --- a/tests/topotests/bgp_evpn_rt5/r2/bgp_l2vpn_evpn_routes.json +++ b/tests/topotests/bgp_evpn_rt5/r2/bgp_l2vpn_evpn_routes.json @@ -2,8 +2,8 @@ "bgpLocalRouterId":"192.168.100.41", "defaultLocPrf":100, "localAS":65000, - "192.168.101.41:2":{ - "rd":"192.168.101.41:2", + "65000:201":{ + "rd":"65000:201", "[5]:[0]:[32]:[192.168.101.41]":{ "prefix":"[5]:[0]:[32]:[192.168.101.41]", "prefixLen":352, @@ -63,8 +63,8 @@ ] } }, - "192.168.102.21:2":{ - "rd":"192.168.102.21:2", + "65000:101":{ + "rd":"65000:101", "[5]:[0]:[32]:[192.168.102.21]":{ "prefix":"[5]:[0]:[32]:[192.168.102.21]", "prefixLen":352, diff --git a/tests/topotests/bgp_evpn_rt5/r2/bgpd.conf b/tests/topotests/bgp_evpn_rt5/r2/frr.conf index 4f1d8e4a37..0bb4b7cab5 100644 --- a/tests/topotests/bgp_evpn_rt5/r2/bgpd.conf +++ b/tests/topotests/bgp_evpn_rt5/r2/frr.conf @@ -1,6 +1,19 @@ +! debug zebra vxlan ! debug bgp neighbor-events ! debug bgp updates ! debug bgp zebra + +vrf r2-vrf-101 + vni 101 + exit-vrf +! +interface loop101 vrf r2-vrf-101 + ip address 192.168.101.41/32 + ipv6 address fd00::2/128 +! +interface r2-eth0 + ip address 192.168.100.41/24 +! router bgp 65000 bgp router-id 192.168.100.41 bgp log-neighbor-changes @@ -27,6 +40,8 @@ router bgp 65000 vrf r2-vrf-101 network fd00::3/128 exit-address-family address-family l2vpn evpn + rd 65000:201 + route-target both 65:101 advertise ipv4 unicast route-map rmap4 advertise ipv6 unicast route-map rmap6 exit-address-family @@ -47,3 +62,4 @@ exit route-map rmap6 deny 2 match ipv6 address acl6_2 exit + diff --git a/tests/topotests/bgp_evpn_rt5/r2/zebra.conf b/tests/topotests/bgp_evpn_rt5/r2/zebra.conf deleted file mode 100644 index 7db40cb59c..0000000000 --- a/tests/topotests/bgp_evpn_rt5/r2/zebra.conf +++ /dev/null @@ -1,19 +0,0 @@ -log stdout - -hostname r2 -password zebra - -! debug zebra vxlan - -vrf r2-vrf-101 - vni 101 - exit-vrf -! -interface loop101 vrf r2-vrf-101 - ip address 192.168.101.41/32 - ipv6 address fd00::2/128 -! -interface r2-eth0 - ip address 192.168.100.41/24 -! - diff --git a/tests/topotests/bgp_evpn_rt5/r3/frr.conf b/tests/topotests/bgp_evpn_rt5/r3/frr.conf new file mode 100644 index 0000000000..3f3851bd8c --- /dev/null +++ b/tests/topotests/bgp_evpn_rt5/r3/frr.conf @@ -0,0 +1,46 @@ +! debug bgp neighbor-events +! debug bgp updates +! debug bgp zebra +vrf r3-vrf-102 + vni 102 + exit-vrf +! +interface r3-eth0 + ip address 192.168.100.61/24 +! +interface loop102 vrf r3-vrf-102 + ip address 192.168.102.61/32 + ipv6 address fd00:6::1/128 +! +router bgp 65500 + bgp router-id 192.168.100.61 + bgp log-neighbor-changes + no bgp default ipv4-unicast + no bgp ebgp-requires-policy + neighbor 192.168.100.21 remote-as 65000 + neighbor 192.168.100.21 capability extended-nexthop + ! + address-family l2vpn evpn + neighbor 192.168.100.21 activate + advertise-all-vni + exit-address-family +! +router bgp 65000 vrf r3-vrf-102 + bgp router-id 192.168.100.61 + bgp log-neighbor-changes + no bgp network import-check + address-family ipv4 unicast + network 192.168.102.102/32 + exit-address-family + address-family ipv6 unicast + network fd00:102::1/128 + exit-address-family + address-family l2vpn evpn + rd 65000:302 + route-target both 65:101 + advertise ipv4 unicast + advertise ipv6 unicast + exit-address-family + ! + + diff --git a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py index a9636a92f4..2b8355af09 100644 --- a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py +++ b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py @@ -42,10 +42,12 @@ def build_topo(tgen): tgen.add_router("r1") tgen.add_router("r2") + tgen.add_router("r3") switch = tgen.add_switch("s1") switch.add_link(tgen.gears["r1"]) switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) switch = tgen.add_switch("s2") switch.add_link(tgen.gears["r1"]) @@ -53,6 +55,9 @@ def build_topo(tgen): switch = tgen.add_switch("s3") switch.add_link(tgen.gears["r2"]) + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["r3"]) + def setup_module(mod): "Sets up the pytest environment" @@ -71,16 +76,16 @@ def setup_module(mod): ) return pytest.skip("Skipping BGP EVPN RT5 NETNS Test. Kernel not supported") - # create VRF vrf-101 on R1 and R2 + # create VRF vrf-101 on R1, R2, R3 # create loop101 cmds_vrflite = [ - "ip link add {}-vrf-101 type vrf table 101", - "ip ru add oif {}-vrf-101 table 101", - "ip ru add iif {}-vrf-101 table 101", - "ip link set dev {}-vrf-101 up", - "ip link add loop101 type dummy", - "ip link set dev loop101 master {}-vrf-101", - "ip link set dev loop101 up", + "ip link add {0}-vrf-{1} type vrf table {1}", + "ip ru add oif {0}-vrf-{1} table {1}", + "ip ru add iif {0}-vrf-{1} table {1}", + "ip link set dev {0}-vrf-{1} up", + "ip link add loop{1} type dummy", + "ip link set dev loop{1} master {0}-vrf-{1}", + "ip link set dev loop{1} up", ] cmds_r2 = [ # config routing 101 @@ -92,6 +97,15 @@ def setup_module(mod): "ip link set vxlan-101 up type bridge_slave learning off flood off mcast_flood off", ] + cmds_r3 = [ # config routing 102 + "ip link add name bridge-102 up type bridge stp_state 0", + "ip link set bridge-102 master {}-vrf-102", + "ip link set dev bridge-102 up", + "ip link add name vxlan-102 type vxlan id 102 dstport 4789 dev r3-eth0 local 192.168.100.61", + "ip link set dev vxlan-102 master bridge-102", + "ip link set vxlan-102 up type bridge_slave learning off flood off mcast_flood off", + ] + # cmds_r1_netns_method3 = [ # "ip link add name vxlan-{1} type vxlan id {1} dstport 4789 dev {0}-eth0 local 192.168.100.21", # "ip link set dev vxlan-{1} netns {0}-vrf-{1}", @@ -111,8 +125,8 @@ def setup_module(mod): router = tgen.gears["r2"] for cmd in cmds_vrflite: - logger.info("cmd to r2: " + cmd.format("r2")) - output = router.cmd_raises(cmd.format("r2")) + logger.info("cmd to r2: " + cmd.format("r2", 101)) + output = router.cmd_raises(cmd.format("r2", 101)) logger.info("result: " + output) for cmd in cmds_r2: @@ -120,6 +134,17 @@ def setup_module(mod): output = router.cmd_raises(cmd.format("r2")) logger.info("result: " + output) + router = tgen.gears["r3"] + for cmd in cmds_vrflite: + logger.info("cmd to r3: " + cmd.format("r3", 102)) + output = router.cmd_raises(cmd.format("r3", 102)) + logger.info("result: " + output) + + for cmd in cmds_r3: + logger.info("cmd to r3: " + cmd.format("r3")) + output = router.cmd_raises(cmd.format("r3")) + logger.info("result: " + output) + tgen.net["r1"].cmd_raises( "ip link add name vxlan-101 type vxlan id 101 dstport 4789 dev r1-eth0 local 192.168.100.21" ) @@ -134,21 +159,13 @@ def setup_module(mod): tgen.net["r1"].cmd_raises("ip -n r1-vrf-101 link set bridge-101 up") tgen.net["r1"].cmd_raises("ip -n r1-vrf-101 link set vxlan-101 up") - for rname, router in router_list.items(): + for rname, router in tgen.routers().items(): + logger.info("Loading router %s" % rname) if rname == "r1": - router.load_config(TopoRouter.RD_MGMTD, None, "--vrfwnetns") - router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, "{}/zebra.conf".format(rname)), - "--vrfwnetns", - ) + router.use_netns_vrf() + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) else: - router.load_config( - TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) - ) - router.load_config( - TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) - ) + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) # Initialize all routers. tgen.start_router() diff --git a/tests/topotests/bgp_gr_notification/test_bgp_gr_notification.py b/tests/topotests/bgp_gr_notification/test_bgp_gr_notification.py index 5d8338d6eb..7e39b83d8f 100644 --- a/tests/topotests/bgp_gr_notification/test_bgp_gr_notification.py +++ b/tests/topotests/bgp_gr_notification/test_bgp_gr_notification.py @@ -187,6 +187,16 @@ def test_bgp_administrative_reset_gr(): """ ) + def _bgp_verify_show_bgp_router_json(): + output = json.loads(r1.vtysh_cmd("show bgp router json")) + expected = { + "bgpStartedAt": "*", + "bgpStartedGracefully": False, + "bgpInMaintenanceMode": False, + "bgpInstanceCount": 1, + } + return topotest.json_cmp(output, expected) + step("Initial BGP converge") test_func = functools.partial(_bgp_converge) _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) @@ -205,6 +215,11 @@ def test_bgp_administrative_reset_gr(): _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert result is None, "Failed to send Administrative Reset notification from R2" + step("Check show bgp router json") + test_func = functools.partial(_bgp_verify_show_bgp_router_json) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Invalid BGP router details" + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] diff --git a/tests/topotests/bgp_ipv6_rtadv/r1/bgp_ipv4_routes.json b/tests/topotests/bgp_ipv6_rtadv/r1/bgp_ipv4_routes.json new file mode 100644 index 0000000000..affe5cf8df --- /dev/null +++ b/tests/topotests/bgp_ipv6_rtadv/r1/bgp_ipv4_routes.json @@ -0,0 +1,35 @@ +{ + "vrfId": 0, + "vrfName": "default", + "routerId": "10.254.254.1", + "defaultLocPrf": 100, + "localAS": 101, + "routes": { + "10.254.254.2/32": [{ + "valid": true, + "bestpath": true, + "selectionReason":"First path received", + "pathFrom":"external", + "prefix":"10.254.254.2", + "prefixLen":32, + "network":"10.254.254.2/32", + "metric":0, + "weight":0, + "path":"102", + "origin":"incomplete", + "nexthops":[{ + "ip":"2001:db8:1::2", + "hostname":"r2", + "afi":"ipv6", + "scope":"global" + },{ + "interface":"r1-eth0", + "hostname":"r2", + "afi":"ipv6", + "scope":"link-local", + "used":true + }]}] + }, + "totalRoutes": 2, + "totalPaths": 2 +} diff --git a/tests/topotests/bgp_ipv6_rtadv/r1/bgp_ipv6_routes.json b/tests/topotests/bgp_ipv6_rtadv/r1/bgp_ipv6_routes.json new file mode 100644 index 0000000000..bccfb45771 --- /dev/null +++ b/tests/topotests/bgp_ipv6_rtadv/r1/bgp_ipv6_routes.json @@ -0,0 +1,35 @@ +{ + "vrfId": 0, + "vrfName": "default", + "routerId": "10.254.254.1", + "defaultLocPrf": 100, + "localAS": 101, + "routes": { + "2001:db8:1::/64": [{ + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"external", + "prefix":"2001:db8:1::", + "prefixLen":64, + "network":"2001:db8:1::/64", + "metric":0, + "weight":0, + "path":"102", + "origin":"incomplete", + "nexthops":[{ + "ip":"2001:db8:1::2", + "hostname":"r2", + "afi":"ipv6", + "scope":"global" + },{ + "interface":"r1-eth0", + "hostname":"r2", + "afi":"ipv6", + "scope":"link-local", + "used":true + }]}] + }, + "totalRoutes": 1, + "totalPaths": 1 +} diff --git a/tests/topotests/bgp_ipv6_rtadv/test_bgp_ipv6_rtadv.py b/tests/topotests/bgp_ipv6_rtadv/test_bgp_ipv6_rtadv.py index 045ac91fc7..5992c30116 100644 --- a/tests/topotests/bgp_ipv6_rtadv/test_bgp_ipv6_rtadv.py +++ b/tests/topotests/bgp_ipv6_rtadv/test_bgp_ipv6_rtadv.py @@ -75,6 +75,45 @@ def teardown_module(_mod): def test_protocols_convergence(): """ + Assert that BGP protocol has converged + by checking the incoming BGP updates have been received. + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Check BGP IPv4 routing table. + logger.info("Checking BGP IPv4 routes for convergence") + router = tgen.gears["r1"] + + json_file = "{}/{}/bgp_ipv4_routes.json".format(CWD, router.name) + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, + router, + "show bgp ipv4 json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) + assertmsg = '"{}" JSON output mismatches'.format(router.name) + assert result is None, assertmsg + + # Check BGP IPv6 routing table. + json_file = "{}/{}/bgp_ipv6_routes.json".format(CWD, router.name) + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, + router, + "show bgp ipv6 json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) + assertmsg = '"{}" JSON output mismatches'.format(router.name) + assert result is None, assertmsg + + +def test_route_convergence(): + """ Assert that all protocols have converged statuses as they depend on it. """ diff --git a/tests/topotests/bgp_peer_solo/r1/frr.conf b/tests/topotests/bgp_peer_solo/r1/frr.conf new file mode 100644 index 0000000000..6ef3688b83 --- /dev/null +++ b/tests/topotests/bgp_peer_solo/r1/frr.conf @@ -0,0 +1,10 @@ +! +int r1-eth0 + ip address 10.255.0.1/24 +! +router bgp 65001 + no bgp ebgp-requires-policy + neighbor 10.255.0.2 timers 1 3 + neighbor 10.255.0.2 timers connect 1 + neighbor 10.255.0.2 remote-as external +! diff --git a/tests/topotests/bgp_peer_solo/r2/frr.conf b/tests/topotests/bgp_peer_solo/r2/frr.conf new file mode 100644 index 0000000000..c58e327418 --- /dev/null +++ b/tests/topotests/bgp_peer_solo/r2/frr.conf @@ -0,0 +1,14 @@ +! +int r2-eth0 + ip address 10.255.0.2/24 +! +router bgp 65002 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 10.255.0.1 remote-as external + neighbor 10.255.0.1 timers 1 3 + neighbor 10.255.0.1 timers connect 1 + address-family ipv4 unicast + network 10.0.0.1/32 + exit-address-family +! diff --git a/tests/topotests/bgp_peer_solo/test_bgp_peer_solo.py b/tests/topotests/bgp_peer_solo/test_bgp_peer_solo.py new file mode 100644 index 0000000000..6bd2430aea --- /dev/null +++ b/tests/topotests/bgp_peer_solo/test_bgp_peer_solo.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +import os +import re +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +from lib import topotest +from lib.topogen import Topogen, get_topogen + +pytestmark = [pytest.mark.bgpd] + + +def setup_module(mod): + topodef = {"s1": ("r1", "r2")} + tgen = Topogen(topodef, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for _, (rname, router) in enumerate(router_list.items(), 1): + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_peer_solo(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + + def _bgp_converge(): + output = json.loads(r1.vtysh_cmd("show bgp ipv4 unicast summary json")) + print("output=", output) + expected = { + "peers": { + "10.255.0.2": { + "remoteAs": 65002, + "state": "Established", + "peerState": "OK", + }, + }, + "totalPeers": 1, + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_converge, + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't converge initial state" + + def _bgp_advertised(): + output = json.loads(r1.vtysh_cmd("show bgp ipv4 neighbors 10.255.0.2 advertised-routes json")) + print("output adv=", output) + expected = { + "advertisedRoutes": { + "10.0.0.1/32": {}, + }, + "totalPrefixCounter": 1, + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_advertised, + ) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) + assert result is None, "Should contain an advertised route" + + # + # Apply solo option + # + r1.vtysh_cmd( + """ + configure terminal + router bgp 65001 + neighbor 10.255.0.2 solo + """ + ) + + def _bgp_no_advertised(): + output = json.loads(r1.vtysh_cmd("show bgp ipv4 neighbors 10.255.0.2 advertised-routes json")) + expected = { + "totalPrefixCounter": 0, + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_no_advertised, + ) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) + assert result is None, "Shouldn't contain advertised routes" + + # + # Unset solo option + # + r1.vtysh_cmd( + """ + configure terminal + router bgp 65001 + no neighbor 10.255.0.2 solo + """ + ) + + test_func = functools.partial( + _bgp_advertised, + ) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) + assert result is None, "Should contain an advertised route" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_rpki_topo1/r2/bgp_rpki_valid.json b/tests/topotests/bgp_rpki_topo1/r2/bgp_rpki_valid.json index 016c019d10..966519bfd9 100644 --- a/tests/topotests/bgp_rpki_topo1/r2/bgp_rpki_valid.json +++ b/tests/topotests/bgp_rpki_topo1/r2/bgp_rpki_valid.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 3, "routerId": "192.0.2.2", "defaultLocPrf": 100, "localAS": 65002, @@ -11,7 +10,6 @@ "origin": "IGP", "metric": 0, "valid": true, - "version": 2, "rpkiValidationState": "valid", "bestpath": { "overall": true, @@ -40,7 +38,6 @@ "origin": "IGP", "metric": 0, "valid": true, - "version": 3, "rpkiValidationState": "valid", "bestpath": { "overall": true, diff --git a/tests/topotests/bgp_srv6_sid_reachability/test_bgp_srv6_sid_reachability.py b/tests/topotests/bgp_srv6_sid_reachability/test_bgp_srv6_sid_reachability.py index f8385401c5..5c0b909517 100755 --- a/tests/topotests/bgp_srv6_sid_reachability/test_bgp_srv6_sid_reachability.py +++ b/tests/topotests/bgp_srv6_sid_reachability/test_bgp_srv6_sid_reachability.py @@ -99,7 +99,7 @@ def teardown_module(mod): def test_ping(): tgen = get_topogen() - check_ping("c11", "192.168.2.1", True, 10, 1) + check_ping("c11", "192.168.2.1", True, 120, 1) check_ping("c11", "192.168.3.1", True, 10, 1) check_ping("c12", "192.168.2.1", True, 10, 1) check_ping("c12", "192.168.3.1", True, 10, 1) @@ -159,6 +159,36 @@ def test_sid_reachable_again_bgp_update(): check_ping("c11", "192.168.2.1", True, 10, 1) +def test_sid_unreachable_no_router(): + get_topogen().gears["r2"].vtysh_cmd( + """ + configure terminal + no router bgp 65002 vrf vrf10 + """ + ) + check_ping("c11", "192.168.2.1", False, 10, 1) + + +def test_sid_reachable_again_no_router(): + get_topogen().gears["r2"].vtysh_cmd( + """ + configure terminal + router bgp 65002 vrf vrf10 + bgp router-id 192.0.2.2 + ! + address-family ipv4 unicast + redistribute connected + sid vpn export 1 + rd vpn export 65002:10 + rt vpn both 0:10 + import vpn + export vpn + exit-address-family + """ + ) + check_ping("c11", "192.168.2.1", True, 10, 1) + + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/test_bgp_srv6l3vpn_to_bgp_vrf3.py b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/test_bgp_srv6l3vpn_to_bgp_vrf3.py index bba0061858..530537646b 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/test_bgp_srv6l3vpn_to_bgp_vrf3.py +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/test_bgp_srv6l3vpn_to_bgp_vrf3.py @@ -95,7 +95,7 @@ def open_json_file(filename): assert False, "Could not read file {}".format(filename) -def check_rib(name, cmd, expected_file): +def check_rib(name, cmd, expected_file, count=30, wait=0.5): def _check(name, dest_addr, match): logger.info("polling") tgen = get_topogen() @@ -107,12 +107,12 @@ def check_rib(name, cmd, expected_file): logger.info('[+] check {} "{}" {}'.format(name, cmd, expected_file)) tgen = get_topogen() func = functools.partial(_check, name, cmd, expected_file) - _, result = topotest.run_and_expect(func, None, count=10, wait=0.5) + _, result = topotest.run_and_expect(func, None, count, wait) assert result is None, "Failed" def test_rib(): - check_rib("r1", "show bgp ipv4 vpn json", "r1/vpnv4_rib.json") + check_rib("r1", "show bgp ipv4 vpn json", "r1/vpnv4_rib.json", 120, 1) check_rib("r2", "show bgp ipv4 vpn json", "r2/vpnv4_rib.json") check_rib("r1", "show ip route vrf vrf10 json", "r1/vrf10v4_rib.json") check_rib("r1", "show ip route vrf vrf20 json", "r1/vrf20v4_rib.json") diff --git a/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py b/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py index 3932c29b98..ee7e00b323 100644 --- a/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py +++ b/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py @@ -232,6 +232,20 @@ def test_local_vs_non_local(): assert False, "Route 60.0.0.0/24 should not have fibPending" +def test_ip_protocol_any_fib_filter(): + # "Filtered route of source protocol any should not get installed in fib" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r2 = tgen.gears["r2"] + r2.vtysh_cmd("conf\nno ip protocol bgp") + r2.vtysh_cmd("conf\nip protocol any route-map LIMIT") + test_bgp_route() + + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_table_direct_topo1/__init__.py b/tests/topotests/bgp_table_direct_topo1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_table_direct_topo1/__init__.py diff --git a/tests/topotests/bgp_table_direct_topo1/r1/frr.conf b/tests/topotests/bgp_table_direct_topo1/r1/frr.conf new file mode 100644 index 0000000000..c45e3456a4 --- /dev/null +++ b/tests/topotests/bgp_table_direct_topo1/r1/frr.conf @@ -0,0 +1,31 @@ +log commands +! +debug bgp zebra +debug zebra events +! +ip route 10.254.254.1/32 lo table 2000 +ip route 10.254.254.2/32 lo table 2000 +ip route 10.254.254.3/32 lo table 2000 +! +interface r1-eth0 + ip address 192.168.10.1/24 +! +interface r1-eth1 vrf blue + ip address 192.168.20.1/24 +! +router bgp 65001 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.10.2 remote-as external + address-family ipv4 unicast + redistribute table-direct 2000 + exit-address-family +! +router bgp 65001 vrf blue + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.20.2 remote-as external + address-family ipv4 unicast + redistribute table-direct 2000 + exit-address-family +!
\ No newline at end of file diff --git a/tests/topotests/bgp_table_direct_topo1/r2/frr.conf b/tests/topotests/bgp_table_direct_topo1/r2/frr.conf new file mode 100644 index 0000000000..04787be0b3 --- /dev/null +++ b/tests/topotests/bgp_table_direct_topo1/r2/frr.conf @@ -0,0 +1,10 @@ +log commands +! +interface r2-eth0 + ip address 192.168.10.2/24 +! +router bgp 65002 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.10.1 remote-as external +!
\ No newline at end of file diff --git a/tests/topotests/bgp_table_direct_topo1/r3/frr.conf b/tests/topotests/bgp_table_direct_topo1/r3/frr.conf new file mode 100644 index 0000000000..2530b28bfd --- /dev/null +++ b/tests/topotests/bgp_table_direct_topo1/r3/frr.conf @@ -0,0 +1,10 @@ +log commands +! +interface r3-eth0 + ip address 192.168.20.2/24 +! +router bgp 65003 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.20.1 remote-as external +!
\ No newline at end of file diff --git a/tests/topotests/bgp_table_direct_topo1/test_bgp_table_direct_topo1.py b/tests/topotests/bgp_table_direct_topo1/test_bgp_table_direct_topo1.py new file mode 100644 index 0000000000..70257be3e7 --- /dev/null +++ b/tests/topotests/bgp_table_direct_topo1/test_bgp_table_direct_topo1.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# test_bgp_table_direct_topo1.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2025 by +# Network Device Education Foundation, Inc. ("NetDEF") +# + +""" +test_bgp_table_direct_topo1.py: Test the FRR PIM MSDP peer. +""" + +import os +import sys +import json +from functools import partial +import re +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest + +# Required to instantiate the topology builder class. +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +from lib.pim import McastTesterHelper + +pytestmark = [pytest.mark.bgpd, pytest.mark.pimd] + +app_helper = McastTesterHelper() + + +def build_topo(tgen): + """ + +----+ +----+ + | r1 | <-> | r2 | + +----+ +----+ + | + | +----+ + --------| r3 | + +----+ + """ + + # Create 3 routers + for routern in range(1, 4): + tgen.add_router(f"r{routern}") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r3"]) + + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + for _, router in router_list.items(): + router.load_frr_config(os.path.join(CWD, f"{router.name}/frr.conf")) + + tgen.gears["r1"].run("ip link add blue type vrf table 10") + tgen.gears["r1"].run("ip link set blue up") + tgen.gears["r1"].run("ip link set r1-eth1 master blue") + + # Initialize all routers. + tgen.start_router() + + app_helper.init(tgen) + + +def teardown_module(): + "Teardown the pytest environment" + tgen = get_topogen() + app_helper.cleanup() + tgen.stop_topology() + + +def expect_bgp_route(router, iptype, route, missing=False): + "Wait until route is present on RIB for protocol." + if missing: + logger.info("waiting route {} go missing in {}".format(route, router)) + else: + logger.info("waiting route {} in {}".format(route, router)) + + tgen = get_topogen() + expected_output = {route: [{"protocol": "bgp"}]} + wait_time = 130 + if missing: + expected_output = {route: None} + wait_time = 5 + + test_func = partial( + topotest.router_json_cmp, + tgen.gears[router], + "show {} route json".format(iptype), + expected_output + ) + + _, result = topotest.run_and_expect(test_func, None, count=130, wait=1) + assertmsg = f'"{router}" convergence failure' + assert result is None, assertmsg + + +def test_bgp_convergence(): + "Wait for BGP protocol convergence" + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("waiting for protocols to converge") + + # Wait for R2 + expect_bgp_route("r2", "ip", "10.254.254.1/32") + expect_bgp_route("r2", "ip", "10.254.254.2/32") + expect_bgp_route("r2", "ip", "10.254.254.3/32") + + # Wait for R3 + expect_bgp_route("r3", "ip", "10.254.254.1/32") + expect_bgp_route("r3", "ip", "10.254.254.2/32") + expect_bgp_route("r3", "ip", "10.254.254.3/32") + + +def test_route_change_convergence(): + "Change routes in table 2000 to test zebra redistribution." + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["r1"].vtysh_cmd(""" + configure terminal + no ip route 10.254.254.2/32 lo table 2000 + ip route 10.254.254.10/32 lo table 2000 + """) + + # Check R2 + expect_bgp_route("r2", "ip", "10.254.254.2/32", missing=True) + expect_bgp_route("r2", "ip", "10.254.254.10/32") + + # Check R3 + expect_bgp_route("r3", "ip", "10.254.254.2/32", missing=True) + expect_bgp_route("r3", "ip", "10.254.254.10/32") + + +def test_configuration_removal_convergence(): + "Remove table direct configuration and check if routes went missing." + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["r1"].vtysh_cmd(""" + configure terminal + router bgp 65001 + address-family ipv4 unicast + no redistribute table-direct 2000 + exit-address-family + exit + + router bgp 65001 vrf blue + address-family ipv4 unicast + no redistribute table-direct 2000 + exit-address-family + exit + """) + + # Check R2 + expect_bgp_route("r2", "ip", "10.254.254.1/32", missing=True) + expect_bgp_route("r2", "ip", "10.254.254.3/32", missing=True) + expect_bgp_route("r2", "ip", "10.254.254.10/32", missing=True) + + # Check R3 + expect_bgp_route("r3", "ip", "10.254.254.1/32", missing=True) + expect_bgp_route("r3", "ip", "10.254.254.3/32", missing=True) + expect_bgp_route("r3", "ip", "10.254.254.10/32", missing=True) + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_table_map/r1/frr.conf b/tests/topotests/bgp_table_map/r1/frr.conf new file mode 100644 index 0000000000..f74440c384 --- /dev/null +++ b/tests/topotests/bgp_table_map/r1/frr.conf @@ -0,0 +1,22 @@ +! +int r1-eth0 + ip address 10.255.0.1/24 +! +access-list AccList seq 5 permit 10.0.0.1/32 +! +route-map TableMap permit 10 + match ip address AccList +exit +! +router bgp 65001 + bgp router-id 10.255.0.1 + no bgp ebgp-requires-policy + neighbor 10.255.0.2 remote-as external + neighbor 10.255.0.2 timers 1 3 + neighbor 10.255.0.2 timers connect 1 + ! + address-family ipv4 unicast + table-map TableMap + exit-address-family +exit +! diff --git a/tests/topotests/bgp_table_map/r2/frr.conf b/tests/topotests/bgp_table_map/r2/frr.conf new file mode 100644 index 0000000000..4523fe49ea --- /dev/null +++ b/tests/topotests/bgp_table_map/r2/frr.conf @@ -0,0 +1,18 @@ +! +int r2-eth0 + ip address 10.255.0.2/24 +! +router bgp 65002 + bgp router-id 10.255.0.2 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 10.255.0.1 remote-as external + neighbor 10.255.0.1 timers 1 3 + neighbor 10.255.0.1 timers connect 1 + ! + address-family ipv4 unicast + network 10.0.0.1/32 + network 10.0.0.2/32 + exit-address-family +exit +! diff --git a/tests/topotests/bgp_table_map/test_bgp_table_map.py b/tests/topotests/bgp_table_map/test_bgp_table_map.py new file mode 100644 index 0000000000..b10680f741 --- /dev/null +++ b/tests/topotests/bgp_table_map/test_bgp_table_map.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +import functools, json, os, pytest, re, sys + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +from lib import topotest +from lib.topogen import Topogen, get_topogen + +pytestmark = [pytest.mark.bgpd] + + +def setup_module(mod): + topodef = {"s1": ("r1", "r2")} + tgen = Topogen(topodef, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for _, (rname, router) in enumerate(router_list.items(), 1): + router.load_frr_config( + os.path.join(CWD, "{}/frr.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_table_map(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + + def _bgp_converge(): + output = json.loads( + r1.vtysh_cmd( "show bgp ipv4 unicast summary json") + ) + expected = { + "peers": { + "10.255.0.2": { + "remoteAs": 65002, + "state": "Established", + "peerState": "OK", + }, + }, + "totalPeers": 1, + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_converge, + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't converge initial state" + + def _bgp_with_table_map(): + output = json.loads(r1.vtysh_cmd("show ip fib json")) + expected = { + "10.0.0.1/32": [], + "10.0.0.2/32": None, + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_with_table_map, + ) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) + assert result is None, "Should contain only one of two shared networks" + + # + # Unset table-map + # + r1.vtysh_cmd( + """ + configure terminal + router bgp 65001 + address-family ipv4 unicast + no table-map TableMap + """ + ) + + def _bgp_without_table_map(): + output = json.loads(r1.vtysh_cmd("show ip fib json")) + expected = { + "10.0.0.1/32": [], + "10.0.0.2/32": [], + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_without_table_map, + ) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) + assert result is None, "Shouldn't contain both shared routes" + + # + # Reset table-map + # + r1.vtysh_cmd( + """ + configure terminal + router bgp 65001 + address-family ipv4 unicast + table-map TableMap + """ + ) + + test_func = functools.partial( + _bgp_with_table_map, + ) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) + assert result is None, "Should contain only one of two shared networks" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_vpnv4_import_allowas_in_between_vrf/__init__.py b/tests/topotests/bgp_vpnv4_import_allowas_in_between_vrf/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_import_allowas_in_between_vrf/__init__.py diff --git a/tests/topotests/bgp_vpnv4_import_allowas_in_between_vrf/r1/frr.conf b/tests/topotests/bgp_vpnv4_import_allowas_in_between_vrf/r1/frr.conf new file mode 100644 index 0000000000..428b1d992f --- /dev/null +++ b/tests/topotests/bgp_vpnv4_import_allowas_in_between_vrf/r1/frr.conf @@ -0,0 +1,35 @@ +! +interface r1-eth0 + ip address 192.168.179.4/24 +exit +! +router bgp 65001 +! +router bgp 65001 vrf CUSTOMER-A + bgp router-id 192.168.179.4 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.179.5 remote-as external +! + address-family ipv4 unicast + neighbor 192.168.179.5 next-hop-self + neighbor 192.168.179.5 allowas-in 10 + label vpn export auto + rd vpn export 100:1 + rt vpn both 100:1 100:2 + export vpn + import vpn + exit-address-family +! +router bgp 65001 vrf CUSTOMER-B + bgp router-id 192.168.0.1 + no bgp ebgp-requires-policy + no bgp network import-check +! + address-family ipv4 unicast + label vpn export auto + rd vpn export 100:2 + rt vpn import 100:1 100:2 + export vpn + import vpn + exit-address-family diff --git a/tests/topotests/bgp_vpnv4_import_allowas_in_between_vrf/r2/frr.conf b/tests/topotests/bgp_vpnv4_import_allowas_in_between_vrf/r2/frr.conf new file mode 100644 index 0000000000..58e63d6cf0 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_import_allowas_in_between_vrf/r2/frr.conf @@ -0,0 +1,48 @@ +! +interface lo + ip address 10.10.10.10/32 +! +interface r2-eth0 + ip address 192.168.179.5/24 +exit +! +interface r2-eth1 + ip address 192.168.2.2/24 +exit +! +router bgp 65002 +! +router bgp 65002 vrf CUSTOMER-A + bgp router-id 192.168.179.5 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.179.4 remote-as external +! + address-family ipv4 unicast + neighbor 192.168.179.4 next-hop-self + neighbor 192.168.179.4 route-map r1 out + label vpn export auto + rd vpn export 100:1 + rt vpn import 100:1 100:2 + export vpn + import vpn + exit-address-family +! +router bgp 65002 vrf CUSTOMER-B + bgp router-id 192.168.0.2 + no bgp ebgp-requires-policy + no bgp network import-check +! + address-family ipv4 unicast + redistribute connected + network 10.10.10.10/32 + label vpn export auto + rd vpn export 100:2 + rt vpn both 100:2 + export vpn + import vpn + exit-address-family +! +route-map r1 permit 10 + set as-path prepend 65001 +! diff --git a/tests/topotests/bgp_vpnv4_import_allowas_in_between_vrf/test_bgp_vpnv4_import_allowas_in_between_vrf.py b/tests/topotests/bgp_vpnv4_import_allowas_in_between_vrf/test_bgp_vpnv4_import_allowas_in_between_vrf.py new file mode 100644 index 0000000000..23325c7a17 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_import_allowas_in_between_vrf/test_bgp_vpnv4_import_allowas_in_between_vrf.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# Copyright (c) 2024 by +# Donatas Abraitis <donatas@opensourcerouting.org> +# + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, get_topogen + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + tgen.add_router("r1") + tgen.add_router("r2") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + + +def setup_module(mod): + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + r1 = tgen.gears["r1"] + r2 = tgen.gears["r2"] + + r1.run("ip link add CUSTOMER-A type vrf table 1001") + r1.run("ip link set up dev CUSTOMER-A") + r1.run("ip link set r1-eth0 master CUSTOMER-A") + + r1.run("ip link add CUSTOMER-B type vrf table 1002") + r1.run("ip link set up dev CUSTOMER-B") + r1.run("ip link set r1-eth1 master CUSTOMER-B") + + r2.run("ip link add CUSTOMER-A type vrf table 1001") + r2.run("ip link set up dev CUSTOMER-A") + r2.run("ip link set r2-eth0 master CUSTOMER-A") + + r2.run("ip link add CUSTOMER-B type vrf table 1002") + r2.run("ip link set up dev CUSTOMER-B") + r2.run("ip link set r2-eth1 master CUSTOMER-B") + + router_list = tgen.routers() + + for _, (rname, router) in enumerate(router_list.items(), 1): + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_vpnv4_import_allowas_in_between_vrf(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + + def _bgp_converge(): + output = json.loads( + r1.vtysh_cmd("show bgp vrf CUSTOMER-A ipv4 unicast 10.10.10.10/32 json") + ) + expected = { + "paths": [ + { + "aspath": { + "string": "65002 65001", + }, + "valid": True, + } + ] + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_converge) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Failed to see 10.10.10.10/32 with a valid next-hop" + + def _vrf_route_imported_to_vrf(): + output = json.loads( + r1.vtysh_cmd("show ip route vrf CUSTOMER-B 10.10.10.10/32 json") + ) + expected = { + "10.10.10.10/32": [ + { + "protocol": "bgp", + "vrfName": "CUSTOMER-B", + "selected": True, + "installed": True, + "table": 1002, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": True, + "ip": "192.168.179.5", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "vrf": "CUSTOMER-A", + "active": True, + } + ], + } + ] + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_vrf_route_imported_to_vrf) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert ( + result is None + ), "Failed to see 10.10.10.10/32 to be imported into CUSTOMER-B VRF (Zebra)" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_advertised_10_125_0_2.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_advertised_10_125_0_2.json new file mode 100644 index 0000000000..7891982653 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_advertised_10_125_0_2.json @@ -0,0 +1,105 @@ +{ + "bgpLocalRouterId":"192.0.2.1", + "defaultLocPrf":100, + "localAS":65500, + "advertisedRoutes":{ + "192.0.2.1:1":{ + "rd":"192.0.2.1:1", + "10.101.0.0/24":{ + "prefix":"10.101.0.0/24", + "advertisedTo":{ + "10.125.0.2":{ + "hostname":"r2" + } + }, + "paths":[{ + "aspath":{ + "string":"Local", + "segments":[], + "length":0 + }, + "nhVrfName":"vrf1", + "announceNexthopSelf":true, + "origin":"incomplete", + "metric":0, + "locPrf":100, + "weight":32768, + "valid":true, + "sourced":true, + "local":true, + "bestpath":{ + "overall":true, + "selectionReason":"First path received" + }, + "extendedCommunity":{ + "string":"RT:192.0.2.1:100" + }, + "originatorId":"192.0.2.1", + "remoteLabel":101, + "nexthops":[{ + "ip":"0.0.0.0", + "hostname":"r1", + "afi":"ipv4", + "metric":0, + "accessible":true, + "used":true + }], + "peer":{ + "peerId":"0.0.0.0", + "routerId":"192.0.2.1" + } + }] + } + }, + "192.0.2.1:3":{ + "rd":"192.0.2.1:3", + "10.103.0.0/24":{ + "prefix":"10.103.0.0/24", + "advertisedTo":{ + "10.125.0.2":{ + "hostname":"r2" + } + }, + "paths":[{ + "aspath":{ + "string":"Local", + "segments":[], + "length":0 + }, + "nhVrfName":"vrf3", + "announceNexthopSelf":true, + "origin":"incomplete", + "metric":0, + "locPrf":100, + "weight":32768, + "valid":true, + "sourced":true, + "local":true, + "bestpath":{ + "overall":true, + "selectionReason":"First path received" + }, + "extendedCommunity":{ + "string":"RT:192.0.2.1:300" + }, + "originatorId":"192.0.2.1", + "remoteLabel":103, + "nexthops":[{ + "ip":"0.0.0.0", + "hostname":"r1", + "afi":"ipv4", + "metric":0, + "accessible":true, + "used":true + }], + "peer":{ + "peerId":"0.0.0.0", + "routerId":"192.0.2.1" + } + }] + } + } + }, + "totalPrefixCounter":2, + "filteredPrefixCounter":0 +} diff --git a/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py b/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py index ee84e375fb..ada37c28c1 100644 --- a/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py +++ b/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py @@ -218,6 +218,29 @@ def check_show_bgp_ipv4_vpn(rname, json_file): assert result is None, assertmsg +def check_show_bgp_ipv4_vpn_peer_advertised_routes(rname, peer, json_file): + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + router = tgen.gears[rname] + + logger.info( + "Checking VPNv4 advertised routes for on {} for peer {}".format(rname, peer) + ) + + json_file = "{}/{}/{}".format(CWD, router.name, json_file) + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, + router, + "show bgp ipv4 vpn neighbors {} advertised-routes detail json".format(peer), + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) + assertmsg = '"{}" JSON output mismatches'.format(router.name) + assert result is None, assertmsg + + def check_show_bgp_vrf_ipv4(rname, json_file): tgen = get_topogen() if tgen.routers_have_failure(): @@ -563,6 +586,21 @@ router bgp 65500 check_show_bgp_vrf_ipv4(rname, "ipv4_vrf_all_routes_init.json") +def test_bgp_advertised_routes_step13(): + """ + Dump advertised routes from r1 to r2 + Check that the localpref attribute is set on the show command + """ + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + check_show_bgp_ipv4_vpn_peer_advertised_routes( + "r1", "10.125.0.2", "ipv4_vpn_routes_advertised_10_125_0_2.json" + ) + + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() diff --git a/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py b/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py index 028bc35358..4aa4404134 100644 --- a/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py +++ b/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py @@ -94,11 +94,9 @@ def setup_module(module): router.net.set_intf_netns("r1-eth0", ns, up=True) # run daemons - router.load_config(TopoRouter.RD_MGMTD, None, "--vrfwnetns") + router.use_netns_vrf() router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, "{}/zebra.conf".format("r1")), - "--vrfwnetns", + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format("r1")) ) router.load_config( TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format("r1")) diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py index dafd19c283..117ff74e43 100755 --- a/tests/topotests/conftest.py +++ b/tests/topotests/conftest.py @@ -31,7 +31,7 @@ from lib import topolog, topotest try: # Used by munet native tests - from munet.testing.fixtures import unet # pylint: disable=all # noqa + from munet.testing.fixtures import stepf, unet # pylint: disable=all # noqa @pytest.fixture(scope="module") def rundir_module(pytestconfig): diff --git a/tests/topotests/grpc_basic/test_basic_grpc.py b/tests/topotests/grpc_basic/test_basic_grpc.py index 5ff2894fd1..e82436c39e 100644 --- a/tests/topotests/grpc_basic/test_basic_grpc.py +++ b/tests/topotests/grpc_basic/test_basic_grpc.py @@ -108,7 +108,7 @@ def test_capabilities(tgen): logging.debug("grpc output: %s", output) modules = sorted(re.findall('name: "([^"]+)"', output)) - expected = ["frr-interface", "frr-routing", "frr-staticd", "frr-vrf"] + expected = ["frr-backend", "frr-interface", "frr-routing", "frr-staticd", "frr-vrf"] assert modules == expected encodings = sorted(re.findall("supported_encodings: (.*)", output)) @@ -145,15 +145,10 @@ def test_get_config(tgen): "ip": "192.168.1.1", "prefix-length": 24 } - ], - "evpn-mh": {}, - "ipv6-router-advertisements": {} + ] } } ] - }, - "frr-zebra:zebra": { - "import-kernel-table": {} } } """ ) diff --git a/tests/topotests/isis_srv6_topo1/test_isis_srv6_topo1.py b/tests/topotests/isis_srv6_topo1/test_isis_srv6_topo1.py index 9c1a23f54f..d17b4702f7 100644 --- a/tests/topotests/isis_srv6_topo1/test_isis_srv6_topo1.py +++ b/tests/topotests/isis_srv6_topo1/test_isis_srv6_topo1.py @@ -245,7 +245,7 @@ def check_ping6(name, dest_addr, expect_connected): if match not in output: return "ping fail" - match = "{} packet loss".format("0%" if expect_connected else "100%") + match = "{} packet loss".format(", 0%" if expect_connected else ", 100%") logger.info("[+] check {} {} {}".format(name, dest_addr, match)) tgen = get_topogen() func = functools.partial(_check, name, dest_addr, match) @@ -333,7 +333,7 @@ def test_ping_step1(): # Setup encap route on rt1, decap route on rt2 tgen.gears["rt1"].vtysh_cmd( - "sharp install seg6-routes fc00:0:9::1 nexthop-seg6 2001:db8:1::2 encap fc00:0:1:2:6:f00d:: 1" + "sharp install seg6-routes fc00:0:9::1 nexthop-seg6 2001:db8:1::2 encap fc00:0:2:6:f00d:: 1" ) tgen.gears["rt6"].vtysh_cmd( "sharp install seg6local-routes fc00:0:f00d:: nexthop-seg6local eth-dst End_DT6 254 1" @@ -443,7 +443,8 @@ def test_ping_step2(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - check_ping6("rt1", "fc00:0:9::1", False) + # ping should pass because route to fc00:0:2:6:f00d:: is still valid + check_ping6("rt1", "fc00:0:9::1", True) # @@ -643,7 +644,8 @@ def test_ping_step4(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - check_ping6("rt1", "fc00:0:9::1", False) + # ping should pass because route to fc00:0:2:6:f00d:: is still valid + check_ping6("rt1", "fc00:0:9::1", True) # @@ -838,7 +840,8 @@ def test_ping_step6(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - check_ping6("rt1", "fc00:0:9::1", False) + # ping should pass because route to fc00:0:2:6:f00d:: is still valid + check_ping6("rt1", "fc00:0:9::1", True) # @@ -1033,7 +1036,8 @@ def test_ping_step8(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - check_ping6("rt1", "fc00:0:9::1", False) + # ping should pass because route to fc00:0:2:6:f00d:: is still valid + check_ping6("rt1", "fc00:0:9::1", True) # diff --git a/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py b/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py index ea404beae4..db2657e523 100644 --- a/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py +++ b/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py @@ -107,6 +107,11 @@ def build_topo(tgen): def setup_module(mod): "Sets up the pytest environment" + + if not os.path.isfile("/usr/sbin/snmpd"): + error_msg = "SNMP not installed - skipping" + pytest.skip(error_msg) + tgen = Topogen(build_topo, mod.__name__) tgen.start_topology() diff --git a/tests/topotests/lib/bmp_collector/bgp/update/rd.py b/tests/topotests/lib/bmp_collector/bgp/update/rd.py index 3f08de5ae9..d44060bf2f 100644 --- a/tests/topotests/lib/bmp_collector/bgp/update/rd.py +++ b/tests/topotests/lib/bmp_collector/bgp/update/rd.py @@ -4,6 +4,7 @@ # Authored by Farid Mihoub <farid.mihoub@6wind.com> # import ipaddress +import socket import struct @@ -45,9 +46,11 @@ class RouteDistinguisher: self.repr_str = f"{self.as_number}:{self.assigned_sp}" elif rd_type == 1: - (self.admin_ipv4, self.assigned_sp) = struct.unpack_from("!IH", self.rd[2:]) - ipv4 = str(ipaddress.IPv4Address(self.admin_ipv4)) - self.repr_str = f"{self.as_number}:{self.assigned_sp}" + (self.admin_ipv4, self.assigned_sp) = struct.unpack_from( + "!4sH", self.rd[2:] + ) + ipv4_str = socket.inet_ntoa(self.admin_ipv4) + self.repr_str = f"{ipv4_str}:{self.assigned_sp}" elif rd_type == 2: (self.four_bytes_as, self.assigned_sp) = struct.unpack_from( diff --git a/tests/topotests/lib/fe_client.py b/tests/topotests/lib/fe_client.py index 784f7d17eb..078df8cb33 100755 --- a/tests/topotests/lib/fe_client.py +++ b/tests/topotests/lib/fe_client.py @@ -78,8 +78,13 @@ GET_DATA_FLAG_STATE = 0x1 GET_DATA_FLAG_CONFIG = 0x2 GET_DATA_FLAG_EXACT = 0x4 -MSG_NOTIFY_FMT = "=B7x" +MSG_NOTIFY_FMT = "=BB6x" NOTIFY_FIELD_RESULT_TYPE = 0 +NOTIFY_FIELD_OP = 1 +NOTIFY_OP_NOTIFICATION = 0 +NOTIFY_OP_REPLACE = 1 +NOTIFY_OP_DELETE = 2 +NOTIFY_OP_PATCH = 3 MSG_NOTIFY_SELECT_FMT = "=B7x" @@ -363,10 +368,12 @@ class Session: raise Exception(f"Received NON-NOTIFY Message: {mfixed}: {mdata}") vsplit = mhdr[HDR_FIELD_VSPLIT] + result_type = mfixed[0] + op = mfixed[1] assert mdata[vsplit - 1] == 0 assert mdata[-1] == 0 - # xpath = mdata[: vsplit - 1].decode("utf-8") - return mdata[vsplit:-1].decode("utf-8") + xpath = mdata[: vsplit - 1].decode("utf-8") + return result_type, op, xpath, mdata[vsplit:-1].decode("utf-8") else: raise TimeoutError("Timeout waiting for notifications") @@ -390,6 +397,9 @@ def __parse_args(): "-c", "--config-only", action="store_true", help="return config only" ) parser.add_argument( + "--datastore", action="store_true", help="listen for datastore notifications" + ) + parser.add_argument( "-q", "--query", nargs="+", metavar="XPATH", help="xpath[s] to query" ) parser.add_argument("-s", "--server", default=MPATH, help="path to server socket") @@ -434,9 +444,31 @@ def __main(): if args.listen is not None: i = args.notify_count + if args.listen: + sess.add_notify_select(True, args.listen) while i > 0 or args.notify_count == 0: - notif = sess.recv_notify(args.listen) - print(notif) + result_type, op, xpath, notif = sess.recv_notify() + if op == NOTIFY_OP_NOTIFICATION: + if args.datastore: + logging.warning("ignoring non-datastore notification: %s", notif) + else: + print(notif) + elif not args.datastore: + logging.warning( + "ignoring datastore notification op: %s xpath: %s data: %s", + op, + xpath, + notif, + ) + elif op == NOTIFY_OP_PATCH: + print(f"#OP=PATCH: {xpath}") + print(notif) + elif op == NOTIFY_OP_REPLACE: + print(f"#OP=REPLACE: {xpath}") + print(notif) + elif op == NOTIFY_OP_DELETE: + print(f"#OP=DELETE: {xpath}") + assert len(notif) == 0 i -= 1 diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py index 0a9a84a4bb..8b1bd6e1a5 100644 --- a/tests/topotests/lib/topogen.py +++ b/tests/topotests/lib/topogen.py @@ -819,6 +819,12 @@ class TopoRouter(TopoGear): gear += " TopoRouter<>" return gear + def use_netns_vrf(self): + """ + Use netns as VRF backend. + """ + self.net.useNetnsVRF() + def check_capability(self, daemon, param): """ Checks a capability daemon against an argument option diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py index ca6723aecb..e2c70cdccd 100644 --- a/tests/topotests/lib/topotest.py +++ b/tests/topotests/lib/topotest.py @@ -1467,6 +1467,7 @@ class Router(Node): self.daemons_options = {"zebra": ""} self.reportCores = True self.version = None + self.use_netns_vrf = False self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid) try: @@ -1622,6 +1623,9 @@ class Router(Node): # breakpoint() # assert False, "can't remove IPs %s" % str(ex) + def useNetnsVRF(self): + self.use_netns_vrf = True + def checkCapability(self, daemon, param): if param is not None: daemon_path = os.path.join(self.daemondir, daemon) @@ -1908,6 +1912,8 @@ class Router(Node): def start_daemon(daemon, instance=None): daemon_opts = self.daemons_options.get(daemon, "") + if self.use_netns_vrf: + daemon_opts += " -w" # get pid and vty filenames and remove the files m = re.match(r"(.* |^)-n (\d+)( ?.*|$)", daemon_opts) diff --git a/tests/topotests/mgmt_notif/r1/frr.conf b/tests/topotests/mgmt_notif/r1/frr.conf index 47e73956cf..36981c94d3 100644 --- a/tests/topotests/mgmt_notif/r1/frr.conf +++ b/tests/topotests/mgmt_notif/r1/frr.conf @@ -4,7 +4,7 @@ log file frr.log no debug memstats-at-exit debug northbound notifications -debug northbound libyang +!! debug northbound libyang debug northbound events debug northbound callbacks diff --git a/tests/topotests/mgmt_notif/r2/frr.conf b/tests/topotests/mgmt_notif/r2/frr.conf index cd052011e0..540961a0e0 100644 --- a/tests/topotests/mgmt_notif/r2/frr.conf +++ b/tests/topotests/mgmt_notif/r2/frr.conf @@ -16,7 +16,7 @@ ip route 22.22.22.22/32 lo interface r2-eth0 ip address 1.1.1.2/24 - ip rip authentication string bar + ip rip authentication string foo ip rip authentication mode text exit diff --git a/tests/topotests/mgmt_notif/test_ds_notify.py b/tests/topotests/mgmt_notif/test_ds_notify.py new file mode 100644 index 0000000000..1759bf8df8 --- /dev/null +++ b/tests/topotests/mgmt_notif/test_ds_notify.py @@ -0,0 +1,238 @@ +# -*- coding: utf-8 eval: (blacken-mode 1) -*- +# +# January 14 2025, Christian Hopps <chopps@labn.net> +# +# Copyright (c) 2025, LabN Consulting, L.L.C. +# +""" +Test YANG Datastore Notifications +""" +import json +import logging +import os +import re +import time + +import pytest +from lib.topogen import Topogen +from lib.topotest import json_cmp +from munet.testing.util import waitline +from oper import check_kernel_32 + +pytestmark = [pytest.mark.ripd, pytest.mark.staticd, pytest.mark.mgmtd] + +CWD = os.path.dirname(os.path.realpath(__file__)) +FE_CLIENT = CWD + "/../lib/fe_client.py" + + +@pytest.fixture(scope="module") +def tgen(request): + "Setup/Teardown the environment and provide tgen argument to tests" + + topodef = { + "s1": ("r1", "r2"), + } + + tgen = Topogen(topodef, request.module.__name__) + tgen.start_topology() + + router_list = tgen.routers() + for _, router in router_list.items(): + router.load_frr_config("frr.conf") + + tgen.start_router() + yield tgen + tgen.stop_topology() + + +def get_op_and_json(output): + op = "" + path = "" + data = "" + for line in output.split("\n"): + if not line: + continue + if not op: + m = re.match("#OP=([A-Z]*): (.*)", line) + if m: + op = m.group(1) + path = m.group(2) + continue + data += line + "\n" + if not op: + assert False, f"No notifcation op present in:\n{output}" + return op, path, data + + +def test_frontend_datastore_notification(tgen): + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"].net + + check_kernel_32(r1, "11.11.11.11", 1, "") + + rc, _, _ = r1.cmd_status(FE_CLIENT + " --help") + + if rc: + pytest.skip("No protoc or present cannot run test") + + # Start our FE client in the background + p = r1.popen( + [FE_CLIENT, "--datastore", "--listen=/frr-interface:lib/interface/state"] + ) + assert waitline(p.stderr, "Connected", timeout=10) + + r1.cmd_raises("ip link set r1-eth0 mtu 1200") + + # {"frr-interface:lib":{"interface":[{"name":"r1-eth0","state":{"if-index":2,"mtu":1200,"mtu6":1200,"speed":10000,"metric":0,"phy-address":"ba:fd:de:b5:8b:90"}}]}} + + try: + # Wait for FE client to exit + output, error = p.communicate(timeout=10) + op, path, data = get_op_and_json(output) + + assert op == "REPLACE" + assert path.startswith("/frr-interface:lib/interface[name='r1-eth0']/state") + + jsout = json.loads(data) + expected = json.loads( + '{"frr-interface:lib":{"interface":[{"name":"r1-eth0","state":{"mtu":1200}}]}}' + ) + result = json_cmp(jsout, expected) + assert result is None + finally: + p.kill() + r1.cmd_raises("ip link set r1-eth0 mtu 1500") + + +def test_backend_datastore_update(tgen): + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"].net + + check_kernel_32(r1, "11.11.11.11", 1, "") + + be_client_path = "/usr/lib/frr/mgmtd_testc" + rc, _, _ = r1.cmd_status(be_client_path + " --help") + + if rc: + pytest.skip("No mgmtd_testc") + + # Start our BE client in the background + p = r1.popen( + [ + be_client_path, + "--timeout=20", + "--log=file:/dev/stderr", + "--datastore", + "--listen", + "/frr-interface:lib/interface", + ] + ) + assert waitline(p.stderr, "Got SUBSCR_REPLY success 1", timeout=10) + + r1.cmd_raises("ip link set r1-eth0 mtu 1200") + try: + expected = json.loads( + '{"frr-interface:lib":{"interface":[{"name":"r1-eth0","state":{"mtu":1200}}]}}' + ) + + output, error = p.communicate(timeout=10) + op, path, data = get_op_and_json(output) + jsout = json.loads(data) + result = json_cmp(jsout, expected) + assert result is None + finally: + p.kill() + r1.cmd_raises("ip link set r1-eth0 mtu 1500") + + +def test_backend_datastore_add_delete(tgen): + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"].net + + check_kernel_32(r1, "11.11.11.11", 1, "") + + be_client_path = "/usr/lib/frr/mgmtd_testc" + rc, _, _ = r1.cmd_status(be_client_path + " --help") + + if rc: + pytest.skip("No mgmtd_testc") + + # Start our BE client in the background + p = r1.popen( + [ + be_client_path, + "--timeout=20", + "--log=file:/dev/stderr", + "--notify-count=2", + "--datastore", + "--listen", + "/frr-interface:lib/interface", + ] + ) + assert waitline(p.stderr, "Got SUBSCR_REPLY success 1", timeout=10) + + r1.cmd_raises('vtysh -c "conf t" -c "int foobar"') + try: + assert waitline( + p.stdout, + re.escape('#OP=REPLACE: /frr-interface:lib/interface[name="foobar"]/state'), + timeout=2, + ) + + r1.cmd_raises('vtysh -c "conf t" -c "no int foobar"') + assert waitline( + p.stdout, + re.escape('#OP=DELETE: /frr-interface:lib/interface[name="foobar"]/state'), + timeout=2, + ) + finally: + p.kill() + r1.cmd_raises('vtysh -c "conf t" -c "no int foobar"') + + +def test_datastore_backend_filters(tgen): + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"].net + + check_kernel_32(r1, "11.11.11.11", 1, "") + + rc, _, _ = r1.cmd_status(FE_CLIENT + " --help") + if rc: + pytest.skip("No protoc or present cannot run test") + + # Start our FE client in the background + p = r1.popen( + [FE_CLIENT, "--datastore", "--listen=/frr-interface:lib/interface/state"] + ) + assert waitline(p.stderr, "Connected", timeout=10) + time.sleep(1) + + try: + output = r1.cmd_raises( + 'vtysh -c "show mgmt get-data /frr-backend:clients/client/state/notify-selectors"' + ) + jsout = json.loads(output) + + # + # Verify only zebra has the notify selector as it's the only provider currently + # + state = {"notify-selectors": ["/frr-interface:lib/interface/state"]} + expected = { + "frr-backend:clients": {"client": [{"name": "zebra", "state": state}]} + } + + result = json_cmp(jsout, expected, exact=True) + assert result is None + except Exception as error: + logging.error("got exception: %s", error) + raise + finally: + p.kill() diff --git a/tests/topotests/mgmt_notif/test_notif.py b/tests/topotests/mgmt_notif/test_notif.py index e5286faae2..f3c7c8bc81 100644 --- a/tests/topotests/mgmt_notif/test_notif.py +++ b/tests/topotests/mgmt_notif/test_notif.py @@ -5,9 +5,8 @@ # # Copyright (c) 2024, LabN Consulting, L.L.C. # - """ -Test YANG Notifications +Test Traditional YANG Notifications """ import json import os @@ -50,33 +49,101 @@ def test_frontend_notification(tgen): check_kernel_32(r1, "11.11.11.11", 1, "") - fe_client_path = CWD + "/../lib/fe_client.py --verbose" + fe_client_path = CWD + "/../lib/fe_client.py" rc, _, _ = r1.cmd_status(fe_client_path + " --help") if rc: pytest.skip("No protoc or present cannot run test") - # The first notifications is a frr-ripd:authentication-type-failure - # So we filter to avoid that, all the rest are frr-ripd:authentication-failure - # making our test deterministic - output = r1.cmd_raises( - fe_client_path + " --listen /frr-ripd:authentication-failure" - ) - jsout = json.loads(output) + # Update config to non-matching authentication. + conf = """ + conf t + interface r1-eth0 + ip rip authentication string bar + """ + r1.cmd_raises("vtysh", stdin=conf) + + try: + output = r1.cmd_raises( + fe_client_path + " --listen /frr-ripd:authentication-failure" + ) + + jsout = json.loads(output) + expected = {"frr-ripd:authentication-failure": {"interface-name": "r1-eth0"}} + result = json_cmp(jsout, expected) + assert result is None + + output = r1.cmd_raises( + fe_client_path + " --use-protobuf --listen /frr-ripd:authentication-failure" + ) + jsout = json.loads(output) + expected = {"frr-ripd:authentication-failure": {"interface-name": "r1-eth0"}} + result = json_cmp(jsout, expected) + assert result is None + finally: + # Update config to matching authentication. + conf = """ + conf t + interface r1-eth0 + ip rip authentication string foo + """ + r1.cmd_raises("vtysh", stdin=conf) + + +def test_frontend_all_notification(tgen): + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) - expected = {"frr-ripd:authentication-failure": {"interface-name": "r1-eth0"}} - result = json_cmp(jsout, expected) - assert result is None + r1 = tgen.gears["r1"].net - output = r1.cmd_raises(fe_client_path + " --use-protobuf --listen") - jsout = json.loads(output) + check_kernel_32(r1, "11.11.11.11", 1, "") - expected = {"frr-ripd:authentication-failure": {"interface-name": "r1-eth0"}} - result = json_cmp(jsout, expected) - assert result is None + fe_client_path = CWD + "/../lib/fe_client.py" + rc, _, _ = r1.cmd_status(fe_client_path + " --help") + if rc: + pytest.skip("No protoc or present cannot run test") -def test_backend_notification(tgen): + # Update config to non-matching authentication. + conf = """ + conf t + interface r1-eth0 + ip rip authentication string bar + """ + r1.cmd_raises("vtysh", stdin=conf) + + try: + # The first notifications is a frr-ripd:authentication-type-failure + # All the rest are frr-ripd:authentication-failure so we check for both. + output = r1.cmd_raises(fe_client_path + " --listen /") + jsout = json.loads(output) + expected = { + "frr-ripd:authentication-type-failure": {"interface-name": "r1-eth0"} + } + result = json_cmp(jsout, expected) + if result is not None: + expected = { + "frr-ripd:authentication-failure": {"interface-name": "r1-eth0"} + } + result = json_cmp(jsout, expected) + assert result is None + + output = r1.cmd_raises(fe_client_path + " --use-protobuf --listen /") + jsout = json.loads(output) + expected = {"frr-ripd:authentication-failure": {"interface-name": "r1-eth0"}} + result = json_cmp(jsout, expected) + assert result is None + finally: + # Update config to matching authentication. + conf = """ + conf t + interface r1-eth0 + ip rip authentication string foo + """ + r1.cmd_raises("vtysh", stdin=conf) + + +def test_backend_yang_notification(tgen): if tgen.routers_have_failure(): pytest.skip(tgen.errors) @@ -90,12 +157,28 @@ def test_backend_notification(tgen): if rc: pytest.skip("No mgmtd_testc") - output = r1.cmd_raises( - be_client_path + " --timeout 20 --log file:mgmt_testc.log --listen /frr-ripd" - ) - - jsout = json.loads(output) - - expected = {"frr-ripd:authentication-failure": {"interface-name": "r1-eth0"}} - result = json_cmp(jsout, expected) - assert result is None + # Update config to non-matching authentication. + conf = """ + conf t + interface r1-eth0 + ip rip authentication string bar + """ + r1.cmd_raises("vtysh", stdin=conf) + + try: + output = r1.cmd_raises( + be_client_path + + " --timeout 20 --log file:mgmt_testc.log --listen /frr-ripd" + ) + jsout = json.loads(output) + expected = {"frr-ripd:authentication-failure": {"interface-name": "r1-eth0"}} + result = json_cmp(jsout, expected) + assert result is None + finally: + # Update config to matching authentication. + conf = """ + conf t + interface r1-eth0 + ip rip authentication string foo + """ + r1.cmd_raises("vtysh", stdin=conf) diff --git a/tests/topotests/mgmt_oper/oper.py b/tests/topotests/mgmt_oper/oper.py index bca452d011..6e1866382b 100644 --- a/tests/topotests/mgmt_oper/oper.py +++ b/tests/topotests/mgmt_oper/oper.py @@ -62,7 +62,7 @@ def disable_debug(router): router.vtysh_cmd("no debug northbound callbacks configuration") -@retry(retry_timeout=30, initial_wait=1) +@retry(retry_timeout=30, initial_wait=0.1) def _do_oper_test(tgen, qr, seconds_left=None): r1 = tgen.gears["r1"].net @@ -113,6 +113,7 @@ def _do_oper_test(tgen, qr, seconds_left=None): "-------DIFF---------\n%s\n---------DIFF----------", pprint.pformat(cmpout), ) + cmpout = str(cmpout) else: cmpout = tt_json_cmp(ojson, ejson, exact=True) if cmpout and ejson_alt is not None: @@ -186,6 +187,20 @@ def addrgen(a, count, step=1): @retry(retry_timeout=30, initial_wait=0.1) +def check_kernel_net(r1, net, vrf): + addr = ipaddress.ip_network(net) + vrfstr = f" vrf {vrf}" if vrf else "" + if addr.version == 6: + kernel = r1.cmd_raises(f"ip -6 route show{vrfstr}") + else: + kernel = r1.cmd_raises(f"ip -4 route show{vrfstr}") + + nentries = len(re.findall("\n", kernel)) + logging.info("checking kernel routing table%s: (%s entries)", vrfstr, nentries) + assert str(net) in kernel, f"Failed to find '{net}' in {nentries} entries" + + +@retry(retry_timeout=30, initial_wait=0.1) def check_kernel_32(r1, start_addr, count, vrf, step=1): start = ipaddress.ip_address(start_addr) vrfstr = f" vrf {vrf}" if vrf else "" diff --git a/tests/topotests/mgmt_oper/test_oper.py b/tests/topotests/mgmt_oper/test_oper.py index 23529bc75e..0d346b5b7c 100644 --- a/tests/topotests/mgmt_oper/test_oper.py +++ b/tests/topotests/mgmt_oper/test_oper.py @@ -15,7 +15,7 @@ import math import pytest from lib.topogen import Topogen -from oper import check_kernel_32, do_oper_test +from oper import check_kernel_32, check_kernel_net, do_oper_test pytestmark = [pytest.mark.staticd, pytest.mark.mgmtd] @@ -85,10 +85,17 @@ def test_oper(tgen): ] r1 = tgen.gears["r1"].net + check_kernel_32(r1, "11.11.11.11", 1, "") check_kernel_32(r1, "12.12.12.12", 1, "") check_kernel_32(r1, "13.13.13.13", 1, "red") check_kernel_32(r1, "14.14.14.14", 1, "red") + + check_kernel_net(r1, "2001:1111::/64", "") + check_kernel_net(r1, "2002:2222::/64", "") + check_kernel_net(r1, "2003:333::/64", "red") + check_kernel_net(r1, "2004:4444::/64", "red") + do_oper_test(tgen, query_results) diff --git a/tests/topotests/mgmt_tests/test_yang_mgmt.py b/tests/topotests/mgmt_tests/test_yang_mgmt.py index 52f6ba4db7..7b74eab6b7 100644 --- a/tests/topotests/mgmt_tests/test_yang_mgmt.py +++ b/tests/topotests/mgmt_tests/test_yang_mgmt.py @@ -181,7 +181,7 @@ def test_mgmt_commit_check(request): raw_config = { "r1": { "raw_config": [ - "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.2/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec", + "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.2/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec", "mgmt commit check", ] } @@ -194,7 +194,7 @@ def test_mgmt_commit_check(request): raw_config = { "r1": { "raw_config": [ - "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.2/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec", + "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.2/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec", "mgmt commit check", ] } @@ -245,7 +245,7 @@ def test_mgmt_commit_apply(request): raw_config = { "r1": { "raw_config": [ - "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.20/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default", + "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.20/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default", "mgmt commit apply", ] } @@ -258,7 +258,7 @@ def test_mgmt_commit_apply(request): raw_config = { "r1": { "raw_config": [ - "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.20/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default", + "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.20/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default", "mgmt commit apply", ] } @@ -298,7 +298,7 @@ def test_mgmt_commit_abort(request): raw_config = { "r1": { "raw_config": [ - "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.3/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default", + "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.3/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default", "mgmt commit abort", ] } @@ -350,7 +350,7 @@ def test_mgmt_delete_config(request): raw_config = { "r1": { "raw_config": [ - "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.168.1.3/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default", + "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.168.1.3/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default", "mgmt commit apply", ] } @@ -381,7 +381,7 @@ def test_mgmt_delete_config(request): raw_config = { "r1": { "raw_config": [ - "mgmt delete-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.168.1.3/32'][afi-safi='frr-routing:ipv4-unicast']", + "mgmt delete-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.168.1.3/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']", "mgmt commit apply", ] } @@ -657,7 +657,7 @@ def test_mgmt_chaos_stop_start_frr(request): raw_config = { "r1": { "raw_config": [ - "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec", + "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec", "mgmt commit apply", ] } @@ -689,7 +689,7 @@ def test_mgmt_chaos_stop_start_frr(request): raw_config = { "r1": { "raw_config": [ - "mgmt delete-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][afi-safi='frr-routing:ipv4-unicast']", + "mgmt delete-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']", "mgmt commit apply", ] } @@ -733,7 +733,7 @@ def test_mgmt_chaos_kill_daemon(request): raw_config = { "r1": { "raw_config": [ - "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec", + "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec", "mgmt commit apply", ] } diff --git a/tests/topotests/msdp_topo3/r1/frr.conf b/tests/topotests/msdp_topo3/r1/frr.conf index d5b10bf8a1..161f0008d3 100644 --- a/tests/topotests/msdp_topo3/r1/frr.conf +++ b/tests/topotests/msdp_topo3/r1/frr.conf @@ -27,5 +27,6 @@ router pim msdp originator-id 10.254.254.1 msdp log sa-events msdp peer 192.168.1.2 source 192.168.1.1 + msdp timers 10 20 3 rp 192.168.1.1 !
\ No newline at end of file diff --git a/tests/topotests/msdp_topo3/r2/frr.conf b/tests/topotests/msdp_topo3/r2/frr.conf index 245c061874..b7a20d4b71 100644 --- a/tests/topotests/msdp_topo3/r2/frr.conf +++ b/tests/topotests/msdp_topo3/r2/frr.conf @@ -24,5 +24,6 @@ router bgp 65200 router pim msdp log sa-events msdp peer 192.168.1.1 source 192.168.1.2 + msdp timers 10 20 3 rp 192.168.1.2 !
\ No newline at end of file diff --git a/tests/topotests/msdp_topo3/test_msdp_topo3.py b/tests/topotests/msdp_topo3/test_msdp_topo3.py index 9393ae7ffd..4e3b18f7c6 100644 --- a/tests/topotests/msdp_topo3/test_msdp_topo3.py +++ b/tests/topotests/msdp_topo3/test_msdp_topo3.py @@ -121,6 +121,29 @@ def test_bgp_convergence(): expect_loopback_route("r2", "ip", "10.254.254.1/32", "bgp") +def test_msdp_connect(): + "Test that the MSDP peers have connected." + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + def msdp_is_connected(router, peer): + logger.info(f"waiting MSDP peer {peer} in router {router}") + expected = {peer: {"state": "established"}} + test_func = partial( + topotest.router_json_cmp, + tgen.gears[router], + "show ip msdp peer json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=2) + assertmsg = '"{}" convergence failure'.format(router) + assert result is None, assertmsg + + msdp_is_connected("r1", "192.168.1.2") + msdp_is_connected("r2", "192.168.1.1") + + def test_sa_learn(): """ Test that the learned SA uses the configured originator ID instead @@ -145,10 +168,10 @@ def test_sa_learn(): "local": "no", } } - } + }, ) - _, result = topotest.run_and_expect(test_func, None, count=100, wait=1) - assert result is None, 'r2 SA convergence failure' + _, result = topotest.run_and_expect(test_func, None, count=80, wait=2) + assert result is None, "r2 SA convergence failure" def test_memory_leak(): diff --git a/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py index 8d91826022..a32b82c7f4 100755 --- a/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py +++ b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py @@ -638,12 +638,6 @@ def pre_config_for_source_dr_tests( "interfaceName": "r5-r4-eth1", "weight": 1, }, - { - "ip": "10.0.3.1", - "afi": "ipv4", - "interfaceName": "r5-r4-eth1", - "weight": 1, - }, ], } ] diff --git a/tests/topotests/munet/base.py b/tests/topotests/munet/base.py index e77eb15dc8..e9410d442d 100644 --- a/tests/topotests/munet/base.py +++ b/tests/topotests/munet/base.py @@ -332,6 +332,10 @@ class Commander: # pylint: disable=R0904 self.last = None self.exec_paths = {} + # For running commands one time only (deals with asyncio) + self.cmd_once_done = {} + self.cmd_once_locks = {} + if not logger: logname = f"munet.{self.__class__.__name__.lower()}.{name}" self.logger = logging.getLogger(logname) @@ -1189,7 +1193,7 @@ class Commander: # pylint: disable=R0904 return stdout # Run a command in a new window (gnome-terminal, screen, tmux, xterm) - def run_in_window( + def run_in_window( # pylint: disable=too-many-positional-arguments self, cmd, wait_for=False, @@ -1205,7 +1209,7 @@ class Commander: # pylint: disable=R0904 Args: cmd: string to execute. - wait_for: True to wait for exit from command or `str` as channel neme to + wait_for: True to wait for exit from command or `str` as channel name to signal on exit, otherwise False background: Do not change focus to new window. title: Title for new pane (tmux) or window (xterm). @@ -1405,6 +1409,26 @@ class Commander: # pylint: disable=R0904 return pane_info + async def async_cmd_raises_once(self, cmd, **kwargs): + if cmd in self.cmd_once_done: + return self.cmd_once_done[cmd] + + if cmd not in self.cmd_once_locks: + self.cmd_once_locks[cmd] = asyncio.Lock() + + async with self.cmd_once_locks[cmd]: + if cmd not in self.cmd_once_done: + self.logger.info("Running command once: %s", cmd) + self.cmd_once_done[cmd] = await commander.async_cmd_raises( + cmd, **kwargs + ) + return self.cmd_once_done[cmd] + + def cmd_raises_once(self, cmd, **kwargs): + if cmd not in self.cmd_once_done: + self.cmd_once_done[cmd] = commander.cmd_raises(cmd, **kwargs) + return self.cmd_once_done[cmd] + def delete(self): """Calls self.async_delete within an exec loop.""" asyncio.run(self.async_delete()) diff --git a/tests/topotests/munet/munet-schema.json b/tests/topotests/munet/munet-schema.json index 6ebc368dcb..44453cb44f 100644 --- a/tests/topotests/munet/munet-schema.json +++ b/tests/topotests/munet/munet-schema.json @@ -117,6 +117,12 @@ "bios": { "type": "string" }, + "cloud-init": { + "type": "boolean" + }, + "cloud-init-disk": { + "type": "string" + }, "disk": { "type": "string" }, @@ -129,7 +135,7 @@ "initial-cmd": { "type": "string" }, - "kerenel": { + "kernel": { "type": "string" }, "initrd": { @@ -373,6 +379,12 @@ "networks-autonumber": { "type": "boolean" }, + "initial-setup-cmd": { + "type": "string" + }, + "initial-setup-host-cmd": { + "type": "string" + }, "networks": { "type": "array", "items": { @@ -452,6 +464,12 @@ "bios": { "type": "string" }, + "cloud-init": { + "type": "boolean" + }, + "cloud-init-disk": { + "type": "string" + }, "disk": { "type": "string" }, @@ -464,7 +482,7 @@ "initial-cmd": { "type": "string" }, - "kerenel": { + "kernel": { "type": "string" }, "initrd": { diff --git a/tests/topotests/munet/mutest/userapi.py b/tests/topotests/munet/mutest/userapi.py index abc63af365..e367e65a15 100644 --- a/tests/topotests/munet/mutest/userapi.py +++ b/tests/topotests/munet/mutest/userapi.py @@ -180,7 +180,7 @@ class TestCase: # sum_hfmt = "{:5.5s} {:4.4s} {:>6.6s} {}" # sum_dfmt = "{:5s} {:4.4s} {:^6.6s} {}" - sum_fmt = "%-8.8s %4.4s %{}s %6s %s" + sum_fmt = "%-10s %4.4s %{}s %6s %s" def __init__( self, diff --git a/tests/topotests/munet/native.py b/tests/topotests/munet/native.py index e3b782396e..4e29fe91b1 100644 --- a/tests/topotests/munet/native.py +++ b/tests/topotests/munet/native.py @@ -24,6 +24,13 @@ import time from pathlib import Path + +try: + # We only want to require yaml for the gen cloud image feature + import yaml +except ImportError: + pass + from . import cli from .base import BaseMunet from .base import Bridge @@ -749,9 +756,11 @@ class L3NodeMixin(NodeMixin): # Disable IPv6 self.cmd_raises("sysctl -w net.ipv6.conf.all.autoconf=0") self.cmd_raises("sysctl -w net.ipv6.conf.all.disable_ipv6=1") + self.cmd_raises("sysctl -w net.ipv6.conf.all.forwarding=0") else: self.cmd_raises("sysctl -w net.ipv6.conf.all.autoconf=1") self.cmd_raises("sysctl -w net.ipv6.conf.all.disable_ipv6=0") + self.cmd_raises("sysctl -w net.ipv6.conf.all.forwarding=1") self.next_p2p_network = ipaddress.ip_network(f"10.254.{self.id}.0/31") self.next_p2p_network6 = ipaddress.ip_network(f"fcff:ffff:{self.id:02x}::/127") @@ -2265,6 +2274,164 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace): tid = self.cpu_thread_map[i] self.cmd_raises_nsonly(f"taskset -cp {aff} {tid}") + def _gen_network_config(self): + intfs = sorted(self.intfs) + if not intfs: + return "" + + self.logger.debug("Generating cloud-init interface config") + config = {} + config["version"] = 2 + enets = config["ethernets"] = {} + + for ifname in sorted(self.intfs): + self.logger.debug("Interface %s", ifname) + conn = find_with_kv(self.config["connections"], "name", ifname) + + index = self.config["connections"].index(conn) + to = conn["to"] + switch = self.unet.switches.get(to) + mtu = conn.get("mtu") + if not mtu and switch: + mtu = switch.config.get("mtu") + + devaddr = conn.get("physical", "") + # Eventually we should get the MAC from /sys + if not devaddr: + mac = self.tapmacs.get(ifname, f"02:aa:aa:aa:{index:02x}:{self.id:02x}") + nic = { + "match": {"macaddress": str(mac)}, + "set-name": ifname, + } + if mtu: + nic["mtu"] = str(mtu) + enets[f"nic-{ifname}"] = nic + + ifaddr4 = self.get_intf_addr(ifname, ipv6=False) + ifaddr6 = self.get_intf_addr(ifname, ipv6=True) + if not ifaddr4 and not ifaddr6: + continue + net = { + "dhcp4": False, + "dhcp6": False, + "accept-ra": False, + "addresses": [], + } + if ifaddr4: + net["addresses"].append(str(ifaddr4)) + if ifaddr6: + net["addresses"].append(str(ifaddr6)) + if switch and hasattr(switch, "is_nat") and switch.is_nat: + net["nameservers"] = {"addresses": []} + nameservers = net["nameservers"]["addresses"] + if hasattr(switch, "ip6_address"): + net["gateway6"] = str(switch.ip6_address) + nameservers.append("2001:4860:4860::8888") + if switch.ip_address: + net["gateway4"] = str(switch.ip_address) + nameservers.append("8.8.8.8") + enets[ifname] = net + + return yaml.safe_dump(config) + + def _gen_cloud_init(self): + qc = self.qemu_config + cc = qc.get("console", {}) + cipath = self.rundir.joinpath("cloud-init.img") + + geniso = get_exec_path_host("genisoimage") + if not geniso: + mfbin = get_exec_path_host("mkfs.vfat") + mcbin = get_exec_path_host("mcopy") + assert ( + mfbin and mcbin + ), "genisoimage or mkfs.vfat,mcopy needed to gen cloud-init disk" + + # + # cloud-init: meta-data + # + mdata = f""" +instance-id: "munet-{self.id}" +local-hostname: "{self.name}" +""" + # + # cloud-init: user-data + # + ssh_auth_s = "" + if bool(self.ssh_keyfile): + pubkey = commander.cmd_raises(f"ssh-keygen -y -f {self.ssh_keyfile}") + assert pubkey, f"Can't extract public key from {self.ssh_keyfile}" + pubkey = pubkey.strip() + ssh_auth_s = f'ssh_authorized_keys: ["{pubkey}"]' + + user = cc.get("user", "root") + password = cc.get("password", "admin") + if user != "root": + root_password = "admin" + else: + root_password = password + + udata = f"""#cloud-config +disable_root: 0 +ssh_pwauth: 1 +hostname: {self.name} +runcmd: + - systemctl enable serial-getty@ttyS1.service + - systemctl start serial-getty@ttyS1.service + - systemctl enable serial-getty@ttyS2.service + - systemctl start serial-getty@ttyS2.service + - systemctl enable serial-getty@hvc0.service + - systemctl start serial-getty@hvc0.service + - systemctl enable serial-getty@hvc1.service + - systemctl start serial-getty@hvc1.service +users: + - name: root + lock_passwd: false + plain_text_passwd: "{root_password}" + {ssh_auth_s} +""" + if user != "root": + udata += """ + - name: {user} + lock_passwd: false + plain_text_passwd: "{password}" + {ssh_auth_s} +""" + # + # cloud-init: network-config + # + ndata = self._gen_network_config() + + # + # Generate cloud-init files + # + cidir = self.rundir.joinpath("ci-data") + commander.cmd_raises(f"mkdir -p {cidir}") + + with open(cidir.joinpath("meta-data"), "w+", encoding="utf-8") as f: + f.write(mdata) + with open(cidir.joinpath("user-data"), "w+", encoding="utf-8") as f: + f.write(udata) + files = "meta-data user-data" + if ndata: + files += " network-config" + with open(cidir.joinpath("network-config"), "w+", encoding="utf-8") as f: + f.write(ndata) + if geniso: + commander.cmd_raises( + f"cd {cidir} && " + f'genisoimage -output "{cipath}" -volid cidata' + f" -joliet -rock {files}" + ) + else: + commander.cmd_raises(f'cd {cidir} && mkfs.vfat -n cidata "{cipath}"') + commander.cmd_raises(f'cd {cidir} && mcopy -oi "{cipath}" {files}') + + # + # Generate cloud-init disk + # + return cipath + async def launch(self): """Launch qemu.""" self.logger.info("%s: Launch Qemu", self) @@ -2367,11 +2534,21 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace): diskpath = os.path.join(self.unet.config_dirname, diskpath) if dtpl and (not disk or not os.path.exists(diskpath)): + basename = os.path.basename(dtpl) + confdir = self.unet.config_dirname + if re.match("(https|http|ftp|tftp):.*", dtpl): + await self.unet.async_cmd_raises_once( + f"cd {confdir} && (test -e {basename} || curl -fLO {dtpl})" + ) + dtplpath = os.path.join(confdir, basename) + if not disk: - disk = qc["disk"] = f"{self.name}-{os.path.basename(dtpl)}" + disk = qc["disk"] = f"{self.name}-{basename}" diskpath = os.path.join(self.rundir, disk) + if self.path_exists(diskpath): logging.debug("Disk '%s' file exists, using.", diskpath) + else: if dtplpath[0] != "/": dtplpath = os.path.join(self.unet.config_dirname, dtpl) @@ -2392,11 +2569,15 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace): args.extend(["-device", "ahci,id=ahci"]) args.extend(["-device", "ide-hd,bus=ahci.0,drive=sata-disk0"]) - cidiskpath = qc.get("cloud-init-disk") - if cidiskpath: - if cidiskpath[0] != "/": - cidiskpath = os.path.join(self.unet.config_dirname, cidiskpath) - args.extend(["-drive", f"file={cidiskpath},if=virtio,format=qcow2"]) + if qc.get("cloud-init"): + cidiskpath = qc.get("cloud-init-disk") + if cidiskpath: + if cidiskpath[0] != "/": + cidiskpath = os.path.join(self.unet.config_dirname, cidiskpath) + else: + cidiskpath = self._gen_cloud_init() + diskfmt = "qcow2" if str(cidiskpath).endswith("qcow2") else "raw" + args.extend(["-drive", f"file={cidiskpath},if=virtio,format={diskfmt}"]) # args.extend(["-display", "vnc=0.0.0.0:40"]) @@ -2488,7 +2669,7 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace): if use_cmdcon: confiles.append("_cmdcon") - password = cc.get("password", "") + password = cc.get("password", "admin") if self.disk_created: password = cc.get("initial-password", password) @@ -2764,9 +2945,11 @@ ff02::2\tip6-allrouters # Disable IPv6 self.cmd_raises("sysctl -w net.ipv6.conf.all.autoconf=0") self.cmd_raises("sysctl -w net.ipv6.conf.all.disable_ipv6=1") + self.cmd_raises("sysctl -w net.ipv6.conf.all.forwarding=0") else: self.cmd_raises("sysctl -w net.ipv6.conf.all.autoconf=1") self.cmd_raises("sysctl -w net.ipv6.conf.all.disable_ipv6=0") + self.cmd_raises("sysctl -w net.ipv6.conf.all.forwarding=1") # we really need overlay, but overlay-layers (used by overlay-images) # counts on things being present in overlay so this temp stuff doesn't work. @@ -2774,6 +2957,24 @@ ff02::2\tip6-allrouters # # Let's hide podman details # self.tmpfs_mount("/var/lib/containers/storage/overlay-containers") + def run_init_cmds(unet, key, on_host): + cmds = unet.topoconf.get(key, "") + cmds = cmds.replace("%CONFIGDIR%", str(unet.config_dirname)) + cmds = cmds.replace("%RUNDIR%", str(unet.rundir)) + cmds = cmds.strip() + if not cmds: + return + + cmds += "\n" + c = commander if on_host else unet + o = c.cmd_raises(cmds) + self.logger.debug( + "run_init_cmds (on-host: %s): %s", on_host, cmd_error(0, o, "") + ) + + run_init_cmds(self, "initial-setup-host-cmd", True) + run_init_cmds(self, "initial-setup-cmd", False) + shellopt = self.cfgopt.getoption("--shell") shellopt = shellopt if shellopt else "" if shellopt == "all" or "." in shellopt.split(","): @@ -3061,7 +3262,8 @@ done""" if not rc: continue logging.info("Pulling missing image %s", image) - aw = self.rootcmd.async_cmd_raises(f"podman pull {image}") + + aw = self.rootcmd.async_cmd_raises_once(f"podman pull {image}") tasks.append(asyncio.create_task(aw)) if not tasks: return diff --git a/tests/topotests/munet/testing/util.py b/tests/topotests/munet/testing/util.py index 99687c0a83..02ff9bd69e 100644 --- a/tests/topotests/munet/testing/util.py +++ b/tests/topotests/munet/testing/util.py @@ -8,12 +8,17 @@ """Utility functions useful when using munet testing functionailty in pytest.""" import asyncio import datetime +import fcntl import functools import logging +import os +import re +import select import sys import time from ..base import BaseMunet +from ..base import Timeout from ..cli import async_cli @@ -23,6 +28,7 @@ from ..cli import async_cli async def async_pause_test(desc=""): + """Pause the running of a test offering options for CLI or PDB.""" isatty = sys.stdout.isatty() if not isatty: desc = f" for {desc}" if desc else "" @@ -49,11 +55,12 @@ async def async_pause_test(desc=""): def pause_test(desc=""): + """Pause the running of a test offering options for CLI or PDB.""" asyncio.run(async_pause_test(desc)) def retry(retry_timeout, initial_wait=0, retry_sleep=2, expected=True): - """decorator: retry while functions return is not None or raises an exception. + """Retry decorated function until it returns None, raises an exception, or timeout. * `retry_timeout`: Retry for at least this many seconds; after waiting initial_wait seconds @@ -116,3 +123,91 @@ def retry(retry_timeout, initial_wait=0, retry_sleep=2, expected=True): return func_retry return _retry + + +def readline(f, timeout=None): + """Read a line or timeout. + + This function will take over the file object, the file object should not be used + outside of calling this function once you begin. + + Return: A line, remaining buffer if EOF (subsequent calls will return ""), or None + for timeout. + """ + fd = f.fileno() + if not hasattr(f, "munet_non_block_set"): + flags = fcntl.fcntl(fd, fcntl.F_GETFL) + fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) + f.munet_non_block_set = True + f.munet_lines = [] + f.munet_buf = "" + + if f.munet_lines: + return f.munet_lines.pop(0) + + timeout = Timeout(timeout) + remaining = timeout.remaining() + while remaining > 0: + ready, _, _ = select.select([fd], [], [], remaining) + if not ready: + return None + + c = f.read() + if c is None: + logging.error("munet readline: unexpected None during read") + return None + + if not c: + logging.debug("munet readline: got eof") + c = f.munet_buf + f.munet_buf = "" + return c + + f.munet_buf += c + while "\n" in f.munet_buf: + a, f.munet_buf = f.munet_buf.split("\n", 1) + f.munet_lines.append(a + "\n") + + if f.munet_lines: + return f.munet_lines.pop(0) + + remaining = timeout.remaining() + return None + + +def waitline(f, regex, timeout=120): + """Match a regex within lines from a file with a timeout. + + This function will take over the file object (by calling `readline` above), the file + object should not be used outside of calling these functions once you begin. + + Return: the match object or None. + """ + timeo = Timeout(timeout) + while not timeo.is_expired(): + line = readline(f, timeo.remaining()) + if line is None: + break + + if line == "": + logging.warning("waitline: got eof while matching '%s'", regex) + return None + + assert line[-1] == "\n" + line = line[:-1] + if not line: + continue + + logging.debug("waitline: searching: '%s' for '%s'", line, regex) + m = re.search(regex, line) + if m: + logging.debug("waitline: matched '%s'", m.group(0)) + return m + + logging.warning( + "Timeout while getting output matching '%s' within %ss (actual %ss)", + regex, + timeout, + timeo.elapsed(), + ) + return None diff --git a/tests/topotests/ospf6_ecmp_inter_area/r1/show_ipv6_routes_ospf6-1.json b/tests/topotests/ospf6_ecmp_inter_area/r1/show_ipv6_routes_ospf6-1.json new file mode 100644 index 0000000000..ff2cf31193 --- /dev/null +++ b/tests/topotests/ospf6_ecmp_inter_area/r1/show_ipv6_routes_ospf6-1.json @@ -0,0 +1,155 @@ +{ + "2001:db8:2::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth0", + "active": true + } + ] + } + ], + "2001:db8:3::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth1", + "active": true + } + ] + } + ], + "2001:db8:4::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + } + ] + } + ], + "2001:db8:5::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth0", + "active": true + } + ] + } + ], + "2001:db8:6::/64": [ + { + "internalNextHopActiveNum": 2, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + }, + { + "fib": true, + "interfaceName": "r1-eth1", + "active": true + } + ] + } + ], + "2001:db8:7::/64": [ + { + "internalNextHopActiveNum": 3, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + }, + { + "fib": true, + "interfaceName": "r1-eth0", + "active": true + }, + { + "fib": true, + "interfaceName": "r1-eth1", + "active": true + } + ] + } + ], + "2001:db8:8::/64": [ + { + "internalNextHopActiveNum": 3, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + }, + { + "fib": true, + "interfaceName": "r1-eth0", + "active": true + }, + { + "fib": true, + "interfaceName": "r1-eth1", + "active": true + } + ] + } + ], + "2001:db8:8007::/64": [ + { + "internalNextHopActiveNum": 3, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + }, + { + "fib": true, + "interfaceName": "r1-eth0", + "active": true + }, + { + "fib": true, + "interfaceName": "r1-eth1", + "active": true + } + ] + } + ], + "2001:db8:8008::/64": [ + { + "internalNextHopActiveNum": 3, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + }, + { + "fib": true, + "interfaceName": "r1-eth0", + "active": true + }, + { + "fib": true, + "interfaceName": "r1-eth1", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/ospf6_ecmp_inter_area/r1/show_ipv6_routes_ospf6-2.json b/tests/topotests/ospf6_ecmp_inter_area/r1/show_ipv6_routes_ospf6-2.json new file mode 100644 index 0000000000..8918feb969 --- /dev/null +++ b/tests/topotests/ospf6_ecmp_inter_area/r1/show_ipv6_routes_ospf6-2.json @@ -0,0 +1,130 @@ +{ + "2001:db8:2::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth0", + "active": true + } + ] + } + ], + "2001:db8:3::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth1", + "active": true + } + ] + } + ], + "2001:db8:4::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + } + ] + } + ], + "2001:db8:5::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth0", + "active": true + } + ] + } + ], + "2001:db8:6::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + } + ] + } + ], + "2001:db8:7::/64": [ + { + "internalNextHopActiveNum": 2, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + }, + { + "fib": true, + "interfaceName": "r1-eth0", + "active": true + } + ] + } + ], + "2001:db8:8::/64": [ + { + "internalNextHopActiveNum": 2, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + }, + { + "fib": true, + "interfaceName": "r1-eth0", + "active": true + } + ] + } + ], + "2001:db8:8007::/64": [ + { + "internalNextHopActiveNum": 2, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + }, + { + "fib": true, + "interfaceName": "r1-eth0", + "active": true + } + ] + } + ], + "2001:db8:8008::/64": [ + { + "internalNextHopActiveNum": 2, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + }, + { + "fib": true, + "interfaceName": "r1-eth0", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/ospf6_ecmp_inter_area/r1/show_ipv6_routes_ospf6-3.json b/tests/topotests/ospf6_ecmp_inter_area/r1/show_ipv6_routes_ospf6-3.json new file mode 100644 index 0000000000..99ceb036d5 --- /dev/null +++ b/tests/topotests/ospf6_ecmp_inter_area/r1/show_ipv6_routes_ospf6-3.json @@ -0,0 +1,110 @@ +{ + "2001:db8:2::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth0", + "active": true + } + ] + } + ], + "2001:db8:3::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth1", + "active": true + } + ] + } + ], + "2001:db8:4::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + } + ] + } + ], + "2001:db8:5::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + } + ] + } + ], + "2001:db8:6::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + } + ] + } + ], + "2001:db8:7::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + } + ] + } + ], + "2001:db8:8::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + } + ] + } + ], + "2001:db8:8007::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + } + ] + } + ], + "2001:db8:8008::/64": [ + { + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "interfaceName": "r1-eth2", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/ospf6_ecmp_inter_area/r7/ospf6d.conf b/tests/topotests/ospf6_ecmp_inter_area/r7/ospf6d.conf index 9b7756e838..451cf2f728 100644 --- a/tests/topotests/ospf6_ecmp_inter_area/r7/ospf6d.conf +++ b/tests/topotests/ospf6_ecmp_inter_area/r7/ospf6d.conf @@ -13,9 +13,6 @@ interface r7-eth2 ipv6 ospf6 hello-interval 2 ipv6 ospf6 dead-interval 10 ! -interface r7-eth3 - shutdown -! router ospf6 ospf6 router-id 10.254.254.7 redistribute connected diff --git a/tests/topotests/ospf6_ecmp_inter_area/r8/ospf6d.conf b/tests/topotests/ospf6_ecmp_inter_area/r8/ospf6d.conf index 33c64979ca..f8d8619bc2 100644 --- a/tests/topotests/ospf6_ecmp_inter_area/r8/ospf6d.conf +++ b/tests/topotests/ospf6_ecmp_inter_area/r8/ospf6d.conf @@ -13,9 +13,6 @@ interface r8-eth2 ipv6 ospf6 hello-interval 2 ipv6 ospf6 dead-interval 10 ! -interface r8-eth3 - shutdown -! router ospf6 ospf6 router-id 10.254.254.8 redistribute connected diff --git a/tests/topotests/ospf6_ecmp_inter_area/test_ospf6_ecmp_inter_area.py b/tests/topotests/ospf6_ecmp_inter_area/test_ospf6_ecmp_inter_area.py index adf289e2de..6f1dd6a54a 100644 --- a/tests/topotests/ospf6_ecmp_inter_area/test_ospf6_ecmp_inter_area.py +++ b/tests/topotests/ospf6_ecmp_inter_area/test_ospf6_ecmp_inter_area.py @@ -43,12 +43,13 @@ route each. With all links up, we expect 3 ECMP paths and 3 nexthops on R1 towards each of R7/8. Then we bring down the R3-R6 link, causing only 2 remaining paths and 2 nexthops on R1. Then we bring down the R2-R5 link, causing only -1 remaining path and 1 nexthop on R1. +1 remaining path and 1 nexthop on R1. -The test is successful if the number of nexthops for the routes on R1 is as -expected. +The test is successful if the number of nexthops and their interfaces for +the routes on R1 is as expected. """ +import json import os import sys from functools import partial @@ -62,7 +63,7 @@ sys.path.append(os.path.join(CWD, "../")) # Import topogen and topotest helpers from lib import topotest from lib.topogen import Topogen, TopoRouter, get_topogen -from lib.topolog import logger +from lib.common_config import write_test_header, write_test_footer, step # Required to instantiate the topology builder class. @@ -111,17 +112,42 @@ def setup_module(mod): tgen.start_router() -def test_wait_protocol_convergence(): +def expect_routes_json(router, exp_routes_json_fname, stepmsg): + "Wait until OSPFv3 routes match JSON spec" + step( + "waiting for OSPFv3 router '{}' routes/nexthops to match {} ({})".format( + router, exp_routes_json_fname, stepmsg + ) + ) + + json_file = "{}/{}/{}".format(CWD, router, exp_routes_json_fname) + expected = json.loads(open(json_file).read()) + tgen = get_topogen() + test_func = partial( + topotest.router_json_cmp, + tgen.gears[router], + "show ipv6 route ospf6 json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assertmsg = '"{}" JSON output mismatches ({})'.format(router, stepmsg) + assert result is None, assertmsg + + +def test_wait_protocol_convergence(request): "Wait for OSPFv3 to converge" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info("waiting for protocols to converge") + step("waiting for protocols to converge") def expect_neighbor_full(router, neighbor): "Wait until OSPFv3 neighborship is full" - logger.info( + step( "waiting for OSPFv3 router '{}' neighborship with '{}'".format( router, neighbor ) @@ -156,57 +182,31 @@ def test_wait_protocol_convergence(): expect_neighbor_full("r8", "10.254.254.5") expect_neighbor_full("r8", "10.254.254.6") + expect_routes_json("r1", "show_ipv6_routes_ospf6-1.json", "post-convergence") -def test_ecmp_inter_area(): + write_test_footer(tc_name) + + +def test_ecmp_inter_area(request): "Test whether OSPFv3 ECMP nexthops are properly updated for inter-area routes after link down" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - def num_nexthops(router): - # Careful: "show ipv6 ospf6 route json" doesn't work here. It will - # only list one route type per prefix and that might not necessarily - # be the best/selected route. "show ipv6 route ospf6 json" only - # lists selected routes, so that's more useful in this case. - routes = tgen.gears[router].vtysh_cmd("show ipv6 route ospf6 json", isjson=True) - route_prefixes_infos = sorted(routes.items()) - # Note: ri may contain one entry per routing protocol, but since - # we've explicitly requested only ospf6 above, we can count on ri[0] - # being the entry we're looking for. - return [ri[0]["internalNextHopActiveNum"] for rp, ri in route_prefixes_infos] - - def expect_num_nexthops(router, expected_num_nexthops, count): - "Wait until number of nexthops for routes matches expectation" - logger.info( - "waiting for OSPFv3 router '{}' nexthops {}".format( - router, expected_num_nexthops - ) - ) - test_func = partial(num_nexthops, router) - _, result = topotest.run_and_expect( - test_func, expected_num_nexthops, count=count, wait=3 - ) - assert ( - result == expected_num_nexthops - ), "'{}' wrong number of route nexthops".format(router) - - # Check nexthops pre link-down - # tgen.mininet_cli() - expect_num_nexthops("r1", [1, 1, 1, 1, 2, 3, 3, 3, 3], 4) - - logger.info("triggering R3-R6 link down") + step("triggering R3-R6 link down") tgen.gears["r3"].run("ip link set r3-eth1 down") - # tgen.mininet_cli() - # Check nexthops post link-down - expect_num_nexthops("r1", [1, 1, 1, 1, 1, 2, 2, 2, 2], 8) + expect_routes_json("r1", "show_ipv6_routes_ospf6-2.json", "post-R3-R6-link-down") - logger.info("triggering R2-R5 link down") + step("triggering R2-R5 link down") tgen.gears["r2"].run("ip link set r2-eth1 down") - # tgen.mininet_cli() - # Check nexthops post link-down - expect_num_nexthops("r1", [1, 1, 1, 1, 1, 1, 1, 1, 1], 8) + expect_routes_json("r1", "show_ipv6_routes_ospf6-3.json", "post-R2-R5-link-down") + + write_test_footer(tc_name) def teardown_module(_mod): @@ -215,14 +215,19 @@ def teardown_module(_mod): tgen.stop_topology() -def test_memory_leak(): +def test_memory_leak(request): "Run the memory leak test and report results." + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() if not tgen.is_memleak_enabled(): pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() + write_test_footer(tc_name) + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] diff --git a/tests/topotests/ospf_metric_propagation/r1/frr.conf b/tests/topotests/ospf_metric_propagation/r1/frr.conf index 082f7df519..09ae6e8d18 100644 --- a/tests/topotests/ospf_metric_propagation/r1/frr.conf +++ b/tests/topotests/ospf_metric_propagation/r1/frr.conf @@ -1,6 +1,10 @@ ! hostname r1 ! +vrf green + ip route 10.48.48.0/24 10.0.91.2 +exit +! interface r1-eth0 ip address 10.0.1.1/24 ip ospf cost 100 @@ -61,6 +65,7 @@ router bgp 99 vrf green address-family ipv4 unicast redistribute connected redistribute ospf + redistribute static import vrf route-map rmap import vrf default import vrf blue @@ -75,7 +80,7 @@ ip prefix-list min seq 5 permit 10.0.80.0/24 route-map costmax permit 20 set metric-type type-1 set metric +1 - set metric-min 713 + set min-metric 713 match ip address prefix-list min exit ! @@ -83,7 +88,7 @@ ip prefix-list max seq 10 permit 10.0.70.0/24 route-map costplus permit 30 set metric-type type-1 set metric +1 - set metric-max 13 + set max-metric 13 match ip address prefix-list max exit ! diff --git a/tests/topotests/ospf_metric_propagation/r1/show_ip_route_static.json b/tests/topotests/ospf_metric_propagation/r1/show_ip_route_static.json index 628a556c62..6060e8bd6b 100644 --- a/tests/topotests/ospf_metric_propagation/r1/show_ip_route_static.json +++ b/tests/topotests/ospf_metric_propagation/r1/show_ip_route_static.json @@ -3,44 +3,23 @@ { "prefix":"10.48.48.0/24", "prefixLen":24, - "protocol":"ospf", - "vrfId":0, - "vrfName":"default", - "distance":20, - "metric":134, - "table":254, - "nexthops":[ - { - "flags":3, - "fib":true, - "ip":"10.0.1.2", - "afi":"ipv4", - "interfaceName":"r1-eth0", - "active":true, - "weight":1 - } - ] - }, - { - "prefix":"10.48.48.0/24", - "prefixLen":24, "protocol":"bgp", "vrfId":0, "vrfName":"default", "selected":true, "destSelected":true, - "distance":20, - "metric":34, + "distance":1, + "metric":1, "installed":true, "table":254, "nexthops":[ { "flags":3, "fib":true, - "ip":"10.0.10.5", + "ip":"10.0.91.2", "afi":"ipv4", - "interfaceName":"r1-eth1", - "vrf":"blue", + "interfaceName":"r1-eth2", + "vrf":"green", "active":true, "weight":1 } diff --git a/tests/topotests/ospf_metric_propagation/r4/frr.conf b/tests/topotests/ospf_metric_propagation/r4/frr.conf index d9832d80b8..b02ae18fc1 100644 --- a/tests/topotests/ospf_metric_propagation/r4/frr.conf +++ b/tests/topotests/ospf_metric_propagation/r4/frr.conf @@ -1,10 +1,6 @@ ! hostname r4 ! -vrf green - ip route 10.48.48.0/24 10.0.94.2 -exit - interface r4-eth0 ip address 10.0.3.4/24 ip ospf cost 100 @@ -63,7 +59,6 @@ router bgp 99 vrf green address-family ipv4 unicast redistribute connected redistribute ospf - redistribute static import vrf route-map rmap import vrf default import vrf blue diff --git a/tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py b/tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py index 4639a1e26b..f574dac4e2 100644 --- a/tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py +++ b/tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py @@ -190,8 +190,8 @@ def test_all_links_up(): assert result is None, assertmsg -def test_static_remote(): - "Test static route at R1 configured on R4" +def test_static(): + "Test static route at R1 leaked from VRF green" tgen = get_topogen() if tgen.routers_have_failure(): @@ -201,7 +201,7 @@ def test_static_remote(): json_file = "{}/r1/show_ip_route_static.json".format(CWD) expected = json.loads(open(json_file).read()) test_func = partial( - topotest.router_json_cmp, r1, "show ip route 10.48.48.2 json", expected + topotest.router_json_cmp, r1, "show ip route 10.48.48.0/24 json", expected ) _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) diff --git a/tests/topotests/ospf_netns_vrf/test_ospf_netns_vrf.py b/tests/topotests/ospf_netns_vrf/test_ospf_netns_vrf.py index 718445f01f..c0a4689d46 100644 --- a/tests/topotests/ospf_netns_vrf/test_ospf_netns_vrf.py +++ b/tests/topotests/ospf_netns_vrf/test_ospf_netns_vrf.py @@ -87,11 +87,9 @@ def setup_module(mod): router.net.set_intf_netns(rname + "-eth0", ns, up=True) router.net.set_intf_netns(rname + "-eth1", ns, up=True) - router.load_config(TopoRouter.RD_MGMTD, None, "--vrfwnetns") + router.use_netns_vrf() router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, "{}/zebra.conf".format(rname)), - "--vrfwnetns", + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) diff --git a/tests/topotests/ospf_prune_next_hop/r1/frr.conf b/tests/topotests/ospf_prune_next_hop/r1/frr.conf new file mode 100644 index 0000000000..130872e8d0 --- /dev/null +++ b/tests/topotests/ospf_prune_next_hop/r1/frr.conf @@ -0,0 +1,23 @@ +! +hostname r1 +ip forwarding +! +interface r1-eth0 + ip address 10.1.1.1/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +interface r1-eth1 + ip address 10.1.2.1/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +! +! +router ospf + ospf router-id 1.1.1.1 + distance 20 + network 10.1.1.0/24 area 0 + network 10.1.2.0/24 area 0 diff --git a/tests/topotests/ospf_prune_next_hop/r2/frr.conf b/tests/topotests/ospf_prune_next_hop/r2/frr.conf new file mode 100644 index 0000000000..4268aea857 --- /dev/null +++ b/tests/topotests/ospf_prune_next_hop/r2/frr.conf @@ -0,0 +1,23 @@ +! +hostname r2 +ip forwarding +! +interface r2-eth0 + ip address 10.1.1.2/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +interface r2-eth1 + ip address 10.1.2.1/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +! +! +router ospf + ospf router-id 2.2.2.2 + distance 20 + network 10.1.1.0/24 area 0 + network 10.1.2.0/24 area 0 diff --git a/tests/topotests/ospf_prune_next_hop/r3/frr.conf b/tests/topotests/ospf_prune_next_hop/r3/frr.conf new file mode 100644 index 0000000000..21d6506d7c --- /dev/null +++ b/tests/topotests/ospf_prune_next_hop/r3/frr.conf @@ -0,0 +1,35 @@ +! +hostname r3 +ip forwarding +! +interface r3-eth0 + ip address 20.1.3.3/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +interface r3-eth1 + ip address 10.1.3.3/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +interface r3-eth2 + ip address 10.1.2.3/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +! +! +router ospf + ospf router-id 3.3.3.3 + distance 20 + network 10.1.2.0/24 area 0 + network 10.1.3.0/24 area 0 + network 20.1.3.0/24 area 1 + area 1 range 20.1.0.0/16 + redistribute static +! +! +ip route 100.100.100.100/32 Null0 diff --git a/tests/topotests/ospf_prune_next_hop/r4/frr.conf b/tests/topotests/ospf_prune_next_hop/r4/frr.conf new file mode 100644 index 0000000000..e66e93e20c --- /dev/null +++ b/tests/topotests/ospf_prune_next_hop/r4/frr.conf @@ -0,0 +1,34 @@ +! +hostname r4 +ip forwarding +! +interface r4-eth0 + ip address 20.1.4.4/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +interface r4-eth1 + ip address 10.1.3.4/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +interface r4-eth2 + ip address 10.1.2.4/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +! +router ospf + ospf router-id 4.4.4.4 + distance 20 + network 10.1.2.0/24 area 0 + network 10.1.3.0/24 area 0 + network 20.1.4.0/24 area 1 + area 1 range 20.1.0.0/16 + redistribute static +! +! +ip route 100.100.100.100/32 Null0 diff --git a/tests/topotests/ospf_prune_next_hop/r5/frr.conf b/tests/topotests/ospf_prune_next_hop/r5/frr.conf new file mode 100644 index 0000000000..2d1dad9925 --- /dev/null +++ b/tests/topotests/ospf_prune_next_hop/r5/frr.conf @@ -0,0 +1,34 @@ +! +hostname r5 +ip forwarding +! +interface r5-eth0 + ip address 20.1.5.5/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +interface r5-eth1 + ip address 10.1.3.5/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +interface r5-eth2 + ip address 10.1.2.5/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +! +router ospf + ospf router-id 5.5.5.5 + distance 20 + network 10.1.2.0/24 area 0 + network 10.1.3.0/24 area 0 + network 20.1.5.0/24 area 1 + area 1 range 20.1.0.0/16 + redistribute static +! +! +ip route 100.100.100.100/32 Null0 diff --git a/tests/topotests/ospf_prune_next_hop/r6/frr.conf b/tests/topotests/ospf_prune_next_hop/r6/frr.conf new file mode 100644 index 0000000000..f343ee7c35 --- /dev/null +++ b/tests/topotests/ospf_prune_next_hop/r6/frr.conf @@ -0,0 +1,34 @@ +! +hostname r6 +ip forwarding +! +interface r6-eth0 + ip address 20.1.6.6/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +interface r6-eth1 + ip address 10.1.3.6/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +interface r6-eth2 + ip address 10.1.2.6/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +! +router ospf + ospf router-id 6.6.6.6 + distance 20 + network 10.1.2.0/24 area 0 + network 10.1.3.0/24 area 0 + network 20.1.6.0/24 area 1 + area 1 range 20.1.0.0/16 + redistribute static +! +! +ip route 100.100.100.100/32 Null0 diff --git a/tests/topotests/ospf_prune_next_hop/r7/frr.conf b/tests/topotests/ospf_prune_next_hop/r7/frr.conf new file mode 100644 index 0000000000..1eeb88c9d0 --- /dev/null +++ b/tests/topotests/ospf_prune_next_hop/r7/frr.conf @@ -0,0 +1,14 @@ +! +hostname r7 +ip forwarding +! +interface r7-eth0 + ip address 10.1.3.7/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +router ospf + ospf router-id 7.7.7.7 + distance 20 + network 10.1.3.0/24 area 0 diff --git a/tests/topotests/ospf_prune_next_hop/r8/frr.conf b/tests/topotests/ospf_prune_next_hop/r8/frr.conf new file mode 100644 index 0000000000..d8facbc01f --- /dev/null +++ b/tests/topotests/ospf_prune_next_hop/r8/frr.conf @@ -0,0 +1,14 @@ +! +hostname r8 +ip forwarding +! +interface r8-eth0 + ip address 10.1.3.8/24 + ip ospf network broadcast + ip ospf hello-interval 1 + ip ospf dead-interval 10 +! +router ospf + ospf router-id 8.8.8.8 + distance 20 + network 10.1.3.0/24 area 0 diff --git a/tests/topotests/ospf_prune_next_hop/test_ospf_prune_next_hop.py b/tests/topotests/ospf_prune_next_hop/test_ospf_prune_next_hop.py new file mode 100644 index 0000000000..88aa6b2e36 --- /dev/null +++ b/tests/topotests/ospf_prune_next_hop/test_ospf_prune_next_hop.py @@ -0,0 +1,343 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# test_ospf_prune_next_hop +# +# Copyright (c) 2025 LabN Consulting +# Acee Lindem +# + +import os +import sys +from functools import partial +import pytest + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, get_topogen +from lib.topolog import logger + +from lib.common_config import ( + step, +) + + +""" +test_ospf_metric_propagation.py: Test OSPF/BGP metric propagation +""" + +TOPOLOGY = """ + 20.1.3.0 20.1.4.0 20.1.5.0 20.1.6.0 + eth0 | .3 eth0 | .4 eth0 | .5 eth0 | .6 + +--+-+ +--+-+ +--+-+ +--+-+ +10.1 3.0 | R3 | | R4 | | R5 | | R6 | + +-----+ | | |eth1 | |eth1 | | 10.1.3.0/24 + | | | | +---- | |--- + -+---+ + | +--+-+ +--+-+ +--+-+ +--+-+ | + | eth2 | .3 eth2 | .4 eth2 | .5 eth2 | | +eth0| | | | | | eth0 + +--+--+ ++-------+ Switch Network +---------++ +--+---+ + | R7 | | 10.1.2.0/24 | | R8 | + +-----+ +------------------------------------+ +------+ + eth1 | .2 + +--+--+ + | R2 | + +--+--+ + eth0 | .2 + 10.1.1.0/24 | + eth0 | .1 + +--+--+ + | R1 | + +-----+ + +""" + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# Required to instantiate the topology builder class. + +pytestmark = [pytest.mark.ospfd, pytest.mark.bgpd] + + +def build_topo(tgen): + "Build function" + + # Create 8 routers + tgen.add_router("r1") + tgen.add_router("r2") + tgen.add_router("r3") + tgen.add_router("r4") + tgen.add_router("r5") + tgen.add_router("r6") + tgen.add_router("r7") + tgen.add_router("r8") + + # Interconect router 1, 2 (0) + switch = tgen.add_switch("s1-1-2") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + # Add standalone networks to router 3 + switch = tgen.add_switch("s2-3") + switch.add_link(tgen.gears["r3"]) + + # Add standalone network to router 4 + switch = tgen.add_switch("s3-4") + switch.add_link(tgen.gears["r4"]) + + # Add standalone network to router 5 + switch = tgen.add_switch("s4-5") + switch.add_link(tgen.gears["r5"]) + + # Add standalone network to router 6 + switch = tgen.add_switch("s5-6") + switch.add_link(tgen.gears["r6"]) + + # Interconect routers 3, 4, 5, and 6 + switch = tgen.add_switch("s6-3") + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r7"]) + switch = tgen.add_switch("s7-4") + switch.add_link(tgen.gears["r4"]) + switch = tgen.add_switch("s8-5") + switch.add_link(tgen.gears["r5"]) + switch = tgen.add_switch("s9-6") + switch.add_link(tgen.gears["r6"]) + switch.add_link(tgen.gears["r8"]) + + # Interconect routers 2, 3, 4, 5, and 6 + switch = tgen.add_switch("s10-lan") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r4"]) + switch.add_link(tgen.gears["r5"]) + switch.add_link(tgen.gears["r6"]) + + +def setup_module(mod): + logger.info("OSPF Prune Next Hops:\n {}".format(TOPOLOGY)) + + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + # Starting Routers + router_list = tgen.routers() + + for rname, router in router_list.items(): + logger.info("Loading router %s" % rname) + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + # Initialize all routers. + tgen.start_router() + + +def teardown_module(): + "Teardown the pytest environment" + tgen = get_topogen() + tgen.stop_topology() + + +def test_intra_area_route_prune(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip("Skipped because of router(s) failure") + + step("Test OSPF intra-area route 10.1.3.0/24 duplicate nexthops already pruned") + # Verify OSPF route 10.1.3.0/24 nexthops pruned already. + r1 = tgen.gears["r1"] + input_dict = { + "10.1.3.0/24": { + "routeType": "N", + "transit": True, + "cost": 30, + "area": "0.0.0.0", + "nexthops": [ + {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "8.8.8.8"} + ], + } + } + test_func = partial( + topotest.router_json_cmp, r1, "show ip ospf route detail json", input_dict + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "OSPF Intra-Area route 10.1.3.0/24 mismatch on router r1" + assert result is None, assertmsg + + step("Test IP route 10.1.3.0/24 installed") + input_dict = { + "10.1.3.0/24": [ + { + "prefix": "10.1.3.0/24", + "prefixLen": 24, + "protocol": "ospf", + "vrfName": "default", + "distance": 20, + "metric": 30, + "installed": True, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "ip": "10.1.1.2", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "active": True, + "weight": 1, + } + ], + } + ] + } + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 10.1.3.0/24 json", input_dict + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "IP route 10.1.3.0/24 mismatch on router r1" + assert result is None, assertmsg + + +def test_inter_area_route_prune(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip("Skipped because of router(s) failure") + + step("Test OSPF inter-area route 20.1.0.0/16 duplicate nexthops installed") + # Verify OSPF route 20.1.0.0/16 duplication nexthops + r1 = tgen.gears["r1"] + input_dict = { + "20.1.0.0/16": { + "routeType": "N IA", + "cost": 30, + "area": "0.0.0.0", + "nexthops": [ + {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "3.3.3.3"}, + {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "4.4.4.4"}, + {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "5.5.5.5"}, + {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "6.6.6.6"}, + ], + } + } + test_func = partial( + topotest.router_json_cmp, r1, "show ip ospf route detail json", input_dict + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "OSPF Inter-Area route 20.1.0.0/16 mismatch on router r1" + assert result is None, assertmsg + + step("Test IP route 10.1.3.0/24 installed with pruned next-hops") + input_dict = { + "20.1.0.0/16": [ + { + "prefix": "20.1.0.0/16", + "prefixLen": 16, + "protocol": "ospf", + "vrfName": "default", + "distance": 20, + "metric": 30, + "installed": True, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "ip": "10.1.1.2", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "active": True, + "weight": 1, + } + ], + } + ] + } + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 20.1.0.0/16 json", input_dict + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "IP route 20.1.1.0/24 mismatch on router r1" + assert result is None, assertmsg + + +def test_as_external_route_prune(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip("Skipped because of router(s) failure") + + step("Test OSPF AS external route 100.100.100.100 duplicate nexthops installed") + # Verify OSPF route 20.1.0.0/16 duplication nexthops + r1 = tgen.gears["r1"] + input_dict = { + "100.100.100.100/32": { + "routeType": "N E2", + "cost": 20, + "type2cost": 20, + "tag": 0, + "nexthops": [ + {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "3.3.3.3"}, + {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "4.4.4.4"}, + {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "5.5.5.5"}, + {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "6.6.6.6"}, + ], + } + } + test_func = partial( + topotest.router_json_cmp, r1, "show ip ospf route detail json", input_dict + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "OSPF AS external route 100.100.100.100/32 mismatch on router r1" + assert result is None, assertmsg + + step("Test IP route 100.100.100.100/32 installed with pruned next-hops") + input_dict = { + "100.100.100.100/32": [ + { + "prefix": "100.100.100.100/32", + "prefixLen": 32, + "protocol": "ospf", + "vrfName": "default", + "distance": 20, + "metric": 20, + "installed": True, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "ip": "10.1.1.2", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "active": True, + "weight": 1, + } + ], + } + ] + } + test_func = partial( + topotest.router_json_cmp, + r1, + "show ip route 100.100.100.100/32 json", + input_dict, + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "IP route 100.100.100.100/32 mismatch on router r1" + assert result is None, assertmsg + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/pim_boundary_acl/test_pim_boundary_acl.py b/tests/topotests/pim_boundary_acl/test_pim_boundary_acl.py index 1488e610c8..2a77c3b223 100644 --- a/tests/topotests/pim_boundary_acl/test_pim_boundary_acl.py +++ b/tests/topotests/pim_boundary_acl/test_pim_boundary_acl.py @@ -135,12 +135,10 @@ def test_pim_asm_igmp_join_acl(): expected = { "r1-eth0":{ "name":"r1-eth0", - "224.0.1.40":"*", "229.1.1.1":None }, "r1-eth2":{ "name":"r1-eth2", - "224.0.1.40":"*", "229.1.1.1":None } } @@ -166,9 +164,7 @@ def test_pim_asm_igmp_join_acl(): "sources":[ { "source":"*", - "timer":"--:--", "forwarded":False, - "uptime":"*" } ] } @@ -227,8 +223,6 @@ def test_pim_asm_igmp_join_acl(): "source":"*", "group":"229.1.1.1", "primaryAddr":"10.0.20.2", - "sockFd":"*", - "upTime":"*" } ] } @@ -286,13 +280,11 @@ def test_pim_ssm_igmp_join_acl(): expected = { "r1-eth0":{ "name":"r1-eth0", - "224.0.1.40":"*", "229.1.1.1":None, "232.1.1.1":None }, "r1-eth2":{ "name":"r1-eth2", - "224.0.1.40":"*", "229.1.1.1":None, "232.1.1.1":None } @@ -319,9 +311,7 @@ def test_pim_ssm_igmp_join_acl(): "sources":[ { "source":"10.0.20.2", - "timer":"*", "forwarded":False, - "uptime":"*" } ] } @@ -397,9 +387,7 @@ def test_pim_ssm_igmp_join_acl(): "sources":[ { "source":"10.0.20.2", - "timer":"*", "forwarded":False, - "uptime":"*" } ] } @@ -422,8 +410,6 @@ def test_pim_ssm_igmp_join_acl(): "source":"10.0.20.2", "group":"232.1.1.1", "primaryAddr":"10.0.20.2", - "sockFd":"*", - "upTime":"*" } ] } @@ -491,9 +477,7 @@ def test_pim_ssm_igmp_join_acl(): "sources":[ { "source":"10.0.40.4", - "timer":"*", "forwarded":False, - "uptime":"*" } ] } diff --git a/tests/topotests/pim_mrib/r1/frr.conf b/tests/topotests/pim_mrib/r1/frr.conf index 28cf2b2c46..7c9d27c60b 100644 --- a/tests/topotests/pim_mrib/r1/frr.conf +++ b/tests/topotests/pim_mrib/r1/frr.conf @@ -20,9 +20,10 @@ interface r1-eth1 ip forwarding ! ip route 10.0.2.0/24 10.0.0.2 50 -ip route 10.0.3.0/24 10.0.1.3 50 +ip route 10.0.3.0/24 10.0.0.2 50 ! router pim rpf-lookup-mode mrib-then-urib rp 10.0.0.1 224.0.0.0/4 + rp 10.0.1.1 225.0.0.0/24 !
\ No newline at end of file diff --git a/tests/topotests/pim_mrib/r2/frr.conf b/tests/topotests/pim_mrib/r2/frr.conf index 3e647f6795..260b6b0f72 100644 --- a/tests/topotests/pim_mrib/r2/frr.conf +++ b/tests/topotests/pim_mrib/r2/frr.conf @@ -25,4 +25,5 @@ ip route 10.0.3.0/24 10.0.2.4 50 router pim rpf-lookup-mode mrib-then-urib rp 10.0.0.1 224.0.0.0/4 + rp 10.0.1.1 225.0.0.0/24 !
\ No newline at end of file diff --git a/tests/topotests/pim_mrib/r3/frr.conf b/tests/topotests/pim_mrib/r3/frr.conf index 9815484d02..5966ae0e8c 100644 --- a/tests/topotests/pim_mrib/r3/frr.conf +++ b/tests/topotests/pim_mrib/r3/frr.conf @@ -25,4 +25,5 @@ ip route 10.0.2.0/24 10.0.3.4 50 router pim rpf-lookup-mode mrib-then-urib rp 10.0.0.1 224.0.0.0/4 + rp 10.0.1.1 225.0.0.0/24 !
\ No newline at end of file diff --git a/tests/topotests/pim_mrib/r4/frr.conf b/tests/topotests/pim_mrib/r4/frr.conf index 8432a7a350..8d9d8f7e2b 100644 --- a/tests/topotests/pim_mrib/r4/frr.conf +++ b/tests/topotests/pim_mrib/r4/frr.conf @@ -18,12 +18,24 @@ interface r4-eth1 ip igmp ip pim ! +interface r4-dum0 + ip address 10.10.0.4/24 + ip igmp + ip pim + ip pim passive +! ip forwarding ! ip route 10.0.0.0/24 10.0.2.2 50 -ip route 10.0.1.0/24 10.0.3.3 50 +ip route 10.0.1.0/24 10.0.2.2 50 +! +ip prefix-list SRCPLIST permit 10.0.0.1/32 +ip prefix-list SRCPLIST2 permit 10.0.1.1/32 +ip prefix-list GRPPLIST permit 239.1.1.1/32 +ip prefix-list GRPPLIST2 permit 239.2.2.2/32 ! router pim rpf-lookup-mode mrib-then-urib rp 10.0.0.1 224.0.0.0/4 + rp 10.0.1.1 225.0.0.0/24 !
\ No newline at end of file diff --git a/tests/topotests/pim_mrib/test_pim_mrib.py b/tests/topotests/pim_mrib/test_pim_mrib.py index 355c503e3b..2a391fa575 100644 --- a/tests/topotests/pim_mrib/test_pim_mrib.py +++ b/tests/topotests/pim_mrib/test_pim_mrib.py @@ -20,6 +20,8 @@ from lib.topogen import Topogen, get_topogen from lib.topolog import logger from lib.pim import ( verify_pim_rp_info, + verify_upstream_iif, + McastTesterHelper, ) from lib.common_config import step, write_test_header @@ -29,6 +31,8 @@ test_pim_mrib.py: Test PIM MRIB overrides and RPF modes TOPOLOGY = """ Test PIM MRIB overrides and RPF modes + Static routes installed that uses R2 to get between R1 and R4. + Tests will install MRIB override through R3 +---+---+ +---+---+ | | 10.0.0.0/24 | | @@ -42,7 +46,7 @@ TOPOLOGY = """ .3 | r3-eth0 r4-eth0 | .4 +---+---+ r3-eth1 r4-eth1 +---+---+ | | .3 .4 | | - + R3 +----------------------+ R4 | + + R3 +----------------------+ R4 |---r4-dum0 10.10.0.4/24 | | 10.0.3.0/24 | | +---+---+ +---+---+ """ @@ -54,9 +58,12 @@ sys.path.append(os.path.join(CWD, "../")) # Required to instantiate the topology builder class. pytestmark = [pytest.mark.pimd] +GROUP1 = "239.1.1.1" +GROUP2 = "239.2.2.2" + def build_topo(tgen): - '''Build function''' + """Build function""" # Create routers tgen.add_router("r1") @@ -70,6 +77,8 @@ def build_topo(tgen): tgen.add_link(tgen.gears["r2"], tgen.gears["r4"], "r2-eth1", "r4-eth0") tgen.add_link(tgen.gears["r3"], tgen.gears["r4"], "r3-eth1", "r4-eth1") + tgen.gears["r4"].run("ip link add r4-dum0 type dummy") + def setup_module(mod): logger.info("PIM MRIB/RPF functionality:\n {}".format(TOPOLOGY)) @@ -87,13 +96,13 @@ def setup_module(mod): def teardown_module(mod): - '''Teardown the pytest environment''' + """Teardown the pytest environment""" tgen = get_topogen() tgen.stop_topology() def test_pim_mrib_init(request): - '''Test boot in MRIB-than-URIB with the default MRIB''' + """Test boot in MRIB-than-URIB with the default MRIB""" tgen = get_topogen() tc_name = request.node.name write_test_header(tc_name) @@ -116,8 +125,23 @@ def test_pim_mrib_init(request): ) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth0", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + def test_pim_mrib_override(request): - '''Test MRIB override nexthop''' + """Test MRIB override nexthop""" tgen = get_topogen() tc_name = request.node.name write_test_header(tc_name) @@ -128,10 +152,11 @@ def test_pim_mrib_override(request): # Install a MRIB route that has a shorter prefix length and lower cost. # In MRIB-than-URIB mode, it should use this route tgen.routers()["r4"].vtysh_cmd( - ''' + """ conf term ip mroute 10.0.0.0/16 10.0.3.3 25 - ''' + exit + """ ) step("Verify rp-info using MRIB nexthop") @@ -149,8 +174,23 @@ def test_pim_mrib_override(request): ) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth1", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + def test_pim_mrib_prefix_mode(request): - '''Test longer prefix lookup mode''' + """Test longer prefix lookup mode""" tgen = get_topogen() tc_name = request.node.name write_test_header(tc_name) @@ -161,11 +201,13 @@ def test_pim_mrib_prefix_mode(request): # Switch to longer prefix match, should switch back to the URIB route # even with the lower cost, the longer prefix match will win because of the mode tgen.routers()["r4"].vtysh_cmd( - ''' + """ conf term router pim rpf-lookup-mode longer-prefix - ''' + exit + exit + """ ) step("Verify rp-info using URIB nexthop") @@ -183,8 +225,23 @@ def test_pim_mrib_prefix_mode(request): ) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth0", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + def test_pim_mrib_dist_mode(request): - '''Test lower distance lookup mode''' + """Test lower distance lookup mode""" tgen = get_topogen() tc_name = request.node.name write_test_header(tc_name) @@ -194,11 +251,13 @@ def test_pim_mrib_dist_mode(request): # Switch to lower distance match, should switch back to the MRIB route tgen.routers()["r4"].vtysh_cmd( - ''' + """ conf term router pim rpf-lookup-mode lower-distance - ''' + exit + exit + """ ) step("Verify rp-info using MRIB nexthop") @@ -216,8 +275,23 @@ def test_pim_mrib_dist_mode(request): ) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth1", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + def test_pim_mrib_urib_mode(request): - '''Test URIB only lookup mode''' + """Test URIB only lookup mode""" tgen = get_topogen() tc_name = request.node.name write_test_header(tc_name) @@ -227,11 +301,13 @@ def test_pim_mrib_urib_mode(request): # Switch to urib only match, should switch back to the URIB route tgen.routers()["r4"].vtysh_cmd( - ''' + """ conf term router pim rpf-lookup-mode urib-only - ''' + exit + exit + """ ) step("Verify rp-info using URIB nexthop") @@ -249,8 +325,23 @@ def test_pim_mrib_urib_mode(request): ) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth0", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + def test_pim_mrib_mrib_mode(request): - '''Test MRIB only lookup mode''' + """Test MRIB only lookup mode""" tgen = get_topogen() tc_name = request.node.name write_test_header(tc_name) @@ -260,11 +351,13 @@ def test_pim_mrib_mrib_mode(request): # Switch to mrib only match, should switch back to the MRIB route tgen.routers()["r4"].vtysh_cmd( - ''' + """ conf term router pim rpf-lookup-mode mrib-only - ''' + exit + exit + """ ) step("Verify rp-info using MRIB nexthop") @@ -282,8 +375,23 @@ def test_pim_mrib_mrib_mode(request): ) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth1", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + def test_pim_mrib_mrib_mode_no_route(request): - '''Test MRIB only with no route''' + """Test MRIB only with no route""" tgen = get_topogen() tc_name = request.node.name write_test_header(tc_name) @@ -293,10 +401,11 @@ def test_pim_mrib_mrib_mode_no_route(request): # Remove the MRIB route, in mrib-only mode, it should switch to no path for the RP tgen.routers()["r4"].vtysh_cmd( - ''' + """ conf term no ip mroute 10.0.0.0/16 10.0.3.3 25 - ''' + exit + """ ) step("Verify rp-info with Unknown next hop") @@ -314,8 +423,818 @@ def test_pim_mrib_mrib_mode_no_route(request): ) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "Unknown", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + +def test_pim_mrib_rpf_lookup_source_list_init(request): + """Test RPF lookup source list with initial setup""" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Reset back to mrib then urib mode + # Also add mode using SRCPLIST(10.0.0.1) and SRCPLIST2(10.0.1.1) + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode mrib-then-urib + rpf-lookup-mode mrib-then-urib source-list SRCPLIST + rpf-lookup-mode mrib-then-urib source-list SRCPLIST2 + exit + exit + """ + ) + + step("Verify rp-info with default next hop") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth0", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth0", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + +def test_pim_mrib_rpf_lookup_source_list_add_mroute(request): + """Test RPF lookup source list with MRIB route on alternate path""" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Add a MRIB route through r4-eth1 that is better distance but worse prefix + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + ip mroute 10.0.0.0/16 10.0.3.3 25 + exit + """ + ) + + step("Verify rp-info with MRIB next hop") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth1", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth1", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + +def test_pim_mrib_rpf_lookup_source_list_src1_prefix_mode(request): + """Test RPF lookup source list src1 longer prefix mode""" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Switch just source 1 to longest prefix + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode longer-prefix source-list SRCPLIST + exit + exit + """ + ) + + step("Verify rp-info with URIB next hop for source 1 and MRIB for source 2") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth0", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth1", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + +def test_pim_mrib_rpf_lookup_source_list_src1_dist_src2_prefix_mode(request): + """Test RPF lookup source list src1 lower distance mode and src2 longer prefix mode""" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Switch source 1 to shortest distance, source 2 to longest prefix + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode lower-distance source-list SRCPLIST + rpf-lookup-mode longer-prefix source-list SRCPLIST2 + exit + exit + """ + ) + + step("Verify rp-info with MRIB next hop for source 1 and URIB for source 2") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth1", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth0", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + +def test_pim_mrib_rpf_lookup_source_list_src1_urib_src2_dist_mode(request): + """Test RPF lookup source list src1 urib mode and src2 lower distance mode""" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Switch source 1 to urib only, source 2 to shorter distance + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode urib-only source-list SRCPLIST + rpf-lookup-mode lower-distance source-list SRCPLIST2 + exit + exit + """ + ) + + step("Verify rp-info with URIB next hop for source 1 and MRIB for source 2") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth0", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth1", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + +def test_pim_mrib_rpf_lookup_source_list_src1_mrib_src2_urib_mode(request): + """Test RPF lookup source list src1 mrib mode and src2 urib mode""" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Switch source 1 to mrib only, source 2 to urib only + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode mrib-only source-list SRCPLIST + rpf-lookup-mode urib-only source-list SRCPLIST2 + exit + exit + """ + ) + + step("Verify rp-info with MRIB next hop for source 1 and URIB for source 2") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth1", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth0", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + +def test_pim_mrib_rpf_lookup_source_list_removed(request): + """Test RPF lookup source list removed""" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Remove both special modes, both should switch to MRIB route + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + no rpf-lookup-mode mrib-only source-list SRCPLIST + no rpf-lookup-mode urib-only source-list SRCPLIST2 + exit + exit + """ + ) + + step("Verify rp-info with MRIB next hop for both sources") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth1", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth1", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + +def test_pim_mrib_rpf_lookup_source_list_del_mroute(request): + """Test RPF lookup source list delete mroute""" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Remove the MRIB route, both should switch to URIB + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + no ip mroute 10.0.0.0/16 10.0.3.3 25 + exit + """ + ) + + step("Verify rp-info with URIB next hop for both sources") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth0", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + result = verify_pim_rp_info( + tgen, + None, + "r4", + "225.0.0.0/24", + "r4-eth0", + "10.0.1.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + +def test_pim_mrib_rpf_lookup_group_list(request): + """Test RPF lookup group list""" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + with McastTesterHelper(tgen) as apphelper: + step( + ("Send multicast traffic from R1 to dense groups {}, {}").format( + GROUP1, GROUP2 + ) + ) + result = apphelper.run_traffic("r1", [GROUP1, GROUP2], bind_intf="r1-eth1") + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + # Reset back to mrib then urib mode + # Also add mode using GRPPLIST(239.1.1.1) and GRPPLIST2(239.2.2.2) + # And do an igmp join to both groups on r4-eth2 + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode mrib-then-urib + rpf-lookup-mode mrib-then-urib group-list GRPPLIST + rpf-lookup-mode mrib-then-urib group-list GRPPLIST2 + exit + int r4-dum0 + ip igmp join-group {} + ip igmp join-group {} + exit + exit + """.format( + GROUP1, GROUP2 + ) + ) + + step("Verify upstream iif with default next hop") + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP1) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("Add MRIB route through alternate path") + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + ip mroute 10.0.0.0/16 10.0.3.3 25 + exit + """ + ) + + step("Verify upstream iif with alternate next hop") + result = verify_upstream_iif(tgen, "r4", "r4-eth1", "10.0.1.1", GROUP1) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth1", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("Switch group1 to longer prefix match (URIB)") + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode longer-prefix group-list GRPPLIST + exit + exit + """.format( + GROUP1, GROUP2 + ) + ) + + step("Verify upstream iif of group1 is URIB, group2 is MRIB") + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP1) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth1", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Switch group1 to lower distance match (MRIB), and group2 to longer prefix (URIB)" + ) + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode lower-distance group-list GRPPLIST + rpf-lookup-mode longer-prefix group-list GRPPLIST2 + exit + exit + """.format( + GROUP1, GROUP2 + ) + ) + + step("Verify upstream iif of group1 is MRIB, group2 is URIB") + result = verify_upstream_iif(tgen, "r4", "r4-eth1", "10.0.1.1", GROUP1) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("Switch group1 to urib match only, and group2 to lower distance (URIB)") + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode urib-only group-list GRPPLIST + rpf-lookup-mode lower-distance group-list GRPPLIST2 + exit + exit + """.format( + GROUP1, GROUP2 + ) + ) + + step("Verify upstream iif of group1 is URIB, group2 is MRIB") + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP1) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth1", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("Switch group1 to mrib match only, and group2 to urib match only") + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode mrib-only group-list GRPPLIST + rpf-lookup-mode urib-only group-list GRPPLIST2 + exit + exit + """.format( + GROUP1, GROUP2 + ) + ) + + step("Verify upstream iif of group1 is MRIB, group2 is URIB") + result = verify_upstream_iif(tgen, "r4", "r4-eth1", "10.0.1.1", GROUP1) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("Delete MRIB route") + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + no ip mroute 10.0.0.0/16 10.0.3.3 25 + exit + """.format( + GROUP1, GROUP2 + ) + ) + + step("Verify upstream iif of group1 is Unknown, group2 is URIB") + result = verify_upstream_iif( + tgen, "r4", "Unknown", "10.0.1.1", GROUP1, "NotJoined" + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + +def test_pim_mrib_rpf_lookup_source_group_lists(request): + """Test RPF lookup source and group lists""" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + with McastTesterHelper(tgen) as apphelper: + step( + ("Send multicast traffic from R1 to dense groups {}, {}").format( + GROUP1, GROUP2 + ) + ) + result = apphelper.run_traffic("r1", [GROUP1, GROUP2], bind_intf="r1-eth1") + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + # Reset back to mrib then urib mode + # Also add mode using GRPPLIST(239.1.1.1) and GRPPLIST2(239.2.2.2), both using SRCPLIST2 + # And do an igmp join to both groups on r4-eth2 + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode mrib-then-urib + rpf-lookup-mode mrib-then-urib group-list GRPPLIST source-list SRCPLIST2 + rpf-lookup-mode mrib-then-urib group-list GRPPLIST2 source-list SRCPLIST2 + exit + int r4-dum0 + ip igmp join-group {} + ip igmp join-group {} + exit + exit + """.format( + GROUP1, GROUP2 + ) + ) + + step("Verify upstream iif with default next hop") + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP1) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("Add MRIB route through alternate path") + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + ip mroute 10.0.0.0/16 10.0.3.3 25 + exit + """ + ) + + step("Verify upstream iif with alternate next hop") + result = verify_upstream_iif(tgen, "r4", "r4-eth1", "10.0.1.1", GROUP1) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth1", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("Switch group1 to longer prefix match (URIB)") + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode longer-prefix group-list GRPPLIST source-list SRCPLIST2 + exit + exit + """.format( + GROUP1, GROUP2 + ) + ) + + step("Verify upstream iif of group1 is URIB, group2 is MRIB") + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP1) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth1", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Switch group1 to lower distance match (MRIB), and group2 to longer prefix (URIB)" + ) + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode lower-distance group-list GRPPLIST source-list SRCPLIST2 + rpf-lookup-mode longer-prefix group-list GRPPLIST2 source-list SRCPLIST2 + exit + exit + """.format( + GROUP1, GROUP2 + ) + ) + + step("Verify upstream iif of group1 is MRIB, group2 is URIB") + result = verify_upstream_iif(tgen, "r4", "r4-eth1", "10.0.1.1", GROUP1) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("Switch group1 to urib match only, and group2 to lower distance (URIB)") + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode urib-only group-list GRPPLIST source-list SRCPLIST2 + rpf-lookup-mode lower-distance group-list GRPPLIST2 source-list SRCPLIST2 + exit + exit + """.format( + GROUP1, GROUP2 + ) + ) + + step("Verify upstream iif of group1 is URIB, group2 is MRIB") + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP1) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth1", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("Switch group1 to mrib match only, and group2 to urib match only") + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + router pim + rpf-lookup-mode mrib-only group-list GRPPLIST source-list SRCPLIST2 + rpf-lookup-mode urib-only group-list GRPPLIST2 source-list SRCPLIST2 + exit + exit + """.format( + GROUP1, GROUP2 + ) + ) + + step("Verify upstream iif of group1 is MRIB, group2 is URIB") + result = verify_upstream_iif(tgen, "r4", "r4-eth1", "10.0.1.1", GROUP1) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("Delete MRIB route") + tgen.routers()["r4"].vtysh_cmd( + """ + conf term + no ip mroute 10.0.0.0/16 10.0.3.3 25 + exit + """.format( + GROUP1, GROUP2 + ) + ) + + step("Verify upstream iif of group1 is Unknown, group2 is URIB") + result = verify_upstream_iif( + tgen, "r4", "Unknown", "10.0.1.1", GROUP1, "NotJoined" + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif(tgen, "r4", "r4-eth0", "10.0.1.1", GROUP2) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + def test_memory_leak(): - '''Run the memory leak test and report results.''' + """Run the memory leak test and report results.""" tgen = get_topogen() if not tgen.is_memleak_enabled(): pytest.skip("Memory leak test/report is disabled") diff --git a/tests/topotests/static_simple/test_static_simple.py b/tests/topotests/static_simple/test_static_simple.py index bb3580a1d8..afde58fbf7 100644 --- a/tests/topotests/static_simple/test_static_simple.py +++ b/tests/topotests/static_simple/test_static_simple.py @@ -61,6 +61,15 @@ def get_ip_networks(super_prefix, count): return tuple(network.subnets(count_log2))[0:count] +def get_src_networks(src_prefix, count, default=""): + if src_prefix is not None: + for net in get_ip_networks(src_prefix, count): + yield " from {}".format(net) + else: + for i in range(0, count): + yield default + + def enable_debug(router): router.vtysh_cmd("debug northbound callbacks configuration") @@ -70,7 +79,7 @@ def disable_debug(router): @retry(retry_timeout=30, initial_wait=0.1) -def check_kernel(r1, super_prefix, count, add, is_blackhole, vrf, matchvia): +def check_kernel(r1, super_prefix, src_prefix, count, add, is_blackhole, vrf, matchvia): network = ipaddress.ip_network(super_prefix) vrfstr = f" vrf {vrf}" if vrf else "" if network.version == 6: @@ -79,26 +88,30 @@ def check_kernel(r1, super_prefix, count, add, is_blackhole, vrf, matchvia): kernel = r1.run(f"ip -4 route show{vrfstr}") logger.debug("checking kernel routing table%s:\n%s", vrfstr, kernel) - for _, net in enumerate(get_ip_networks(super_prefix, count)): + for net, srcnet in zip( + get_ip_networks(super_prefix, count), get_src_networks(src_prefix, count) + ): + netfull = str(net) + srcnet if not add: - assert str(net) not in kernel + assert netfull + " nhid" not in kernel + assert netfull + " via" not in kernel continue if is_blackhole: - route = f"blackhole {str(net)} proto (static|196) metric 20" + route = f"blackhole {netfull}(?: dev lo)? proto (static|196) metric 20" else: route = ( - f"{str(net)}(?: nhid [0-9]+)? {matchvia} " - "proto (static|196) metric 20" + f"{netfull}(?: nhid [0-9]+)? {matchvia} proto (static|196) metric 20" ) assert re.search(route, kernel), f"Failed to find \n'{route}'\n in \n'{kernel}'" -def do_config( +def do_config_inner( r1, count, add=True, do_ipv6=False, + do_sadr=False, via=None, vrf=None, use_cli=False, @@ -109,11 +122,18 @@ def do_config( # # Set the route details # - - if vrf: - super_prefix = "2002::/48" if do_ipv6 else "20.0.0.0/8" + src_prefs = [None, None] + if do_ipv6 and do_sadr: + # intentionally using overlapping prefix + super_prefs = ["2001::/48", "2002::/48"] + src_prefs = ["2001:db8:1111::/48", "2001:db8:2222::/48"] + elif do_ipv6: + super_prefs = ["2001::/48", "2002::/48"] else: - super_prefix = "2001::/48" if do_ipv6 else "10.0.0.0/8" + super_prefs = ["10.0.0.0/8", "20.0.0.0/8"] + + super_prefix = super_prefs[1 if vrf else 0] + src_prefix = src_prefs[1 if vrf else 0] matchvia = "" if via == "blackhole": @@ -144,11 +164,13 @@ def do_config( if vrf: f.write("vrf {}\n".format(vrf)) - for _, net in enumerate(get_ip_networks(super_prefix, count)): + for net, srcnet in zip( + get_ip_networks(super_prefix, count), get_src_networks(src_prefix, count) + ): if add: - f.write("ip route {} {}\n".format(net, via)) + f.write("ip route {}{} {}\n".format(net, srcnet, via)) else: - f.write("no ip route {} {}\n".format(net, via)) + f.write("no ip route {}{} {}\n".format(net, srcnet, via)) # # Load config file. @@ -165,7 +187,9 @@ def do_config( # # Verify the results are in the kernel # - check_kernel(r1, super_prefix, count, add, via == "blackhole", vrf, matchvia) + check_kernel( + r1, super_prefix, src_prefix, count, add, via == "blackhole", vrf, matchvia + ) optyped = "added" if add else "removed" logger.debug( @@ -175,6 +199,12 @@ def do_config( ) +def do_config(*args, **kwargs): + do_config_inner(*args, do_ipv6=False, do_sadr=False, **kwargs) + do_config_inner(*args, do_ipv6=True, do_sadr=False, **kwargs) + do_config_inner(*args, do_ipv6=True, do_sadr=True, **kwargs) + + def guts(tgen, vrf, use_cli): if tgen.routers_have_failure(): pytest.skip(tgen.errors) @@ -183,20 +213,20 @@ def guts(tgen, vrf, use_cli): count = 10 step(f"add {count} via gateway", reset=True) - do_config(r1, count, True, False, vrf=vrf, use_cli=use_cli) + do_config(r1, count, True, vrf=vrf, use_cli=use_cli) step(f"remove {count} via gateway") - do_config(r1, count, False, False, vrf=vrf, use_cli=use_cli) + do_config(r1, count, False, vrf=vrf, use_cli=use_cli) via = f"lo-{vrf}" if vrf else "lo" step("add via loopback") - do_config(r1, 1, True, False, via=via, vrf=vrf, use_cli=use_cli) + do_config(r1, 1, True, via=via, vrf=vrf, use_cli=use_cli) step("remove via loopback") - do_config(r1, 1, False, False, via=via, vrf=vrf, use_cli=use_cli) + do_config(r1, 1, False, via=via, vrf=vrf, use_cli=use_cli) step("add via blackhole") - do_config(r1, 1, True, False, via="blackhole", vrf=vrf, use_cli=use_cli) + do_config(r1, 1, True, via="blackhole", vrf=vrf, use_cli=use_cli) step("remove via blackhole") - do_config(r1, 1, False, False, via="blackhole", vrf=vrf, use_cli=use_cli) + do_config(r1, 1, False, via="blackhole", vrf=vrf, use_cli=use_cli) def test_static_cli(tgen): diff --git a/tests/topotests/static_srv6_sids/__init__.py b/tests/topotests/static_srv6_sids/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/static_srv6_sids/__init__.py diff --git a/tests/topotests/static_srv6_sids/expected_srv6_sids.json b/tests/topotests/static_srv6_sids/expected_srv6_sids.json new file mode 100644 index 0000000000..5799d97988 --- /dev/null +++ b/tests/topotests/static_srv6_sids/expected_srv6_sids.json @@ -0,0 +1,142 @@ +{ + "fcbb:bbbb:1::/48": [ + { + "prefix": "fcbb:bbbb:1::/48", + "prefixLen": 48, + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "sr0", + "active": true, + "weight": 1, + "seg6local": { + "action": "End" + }, + "seg6localContext": { + + } + } + ] + } + ], + "fcbb:bbbb:1:fe10::/64": [ + { + "prefix": "fcbb:bbbb:1:fe10::/64", + "prefixLen": 64, + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "Vrf10", + "active": true, + "weight": 1, + "seg6local": { + "action": "End.DT4" + }, + "seg6localContext": { + "table": 10 + } + } + ] + } + ], + "fcbb:bbbb:1:fe20::/64": [ + { + "prefix": "fcbb:bbbb:1:fe20::/64", + "prefixLen": 64, + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "Vrf20", + "active": true, + "weight": 1, + "seg6local": { + "action": "End.DT6" + }, + "seg6localContext": { + "table": 20 + } + } + ] + } + ], + "fcbb:bbbb:1:fe30::/64": [ + { + "prefix": "fcbb:bbbb:1:fe30::/64", + "prefixLen": 64, + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "Vrf30", + "active": true, + "weight": 1, + "seg6local": { + "action": "End.DT46" + }, + "seg6localContext": { + "table": 30 + } + } + ] + } + ] +}
\ No newline at end of file diff --git a/tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_1.json b/tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_1.json new file mode 100644 index 0000000000..e1a2a16afe --- /dev/null +++ b/tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_1.json @@ -0,0 +1,107 @@ +{ + "fcbb:bbbb:1:fe10::/64": [ + { + "prefix": "fcbb:bbbb:1:fe10::/64", + "prefixLen": 64, + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "Vrf10", + "active": true, + "weight": 1, + "seg6local": { + "action": "End.DT4" + }, + "seg6localContext": { + "table": 10 + } + } + ] + } + ], + "fcbb:bbbb:1:fe20::/64": [ + { + "prefix": "fcbb:bbbb:1:fe20::/64", + "prefixLen": 64, + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "Vrf20", + "active": true, + "weight": 1, + "seg6local": { + "action": "End.DT6" + }, + "seg6localContext": { + "table": 20 + } + } + ] + } + ], + "fcbb:bbbb:1:fe30::/64": [ + { + "prefix": "fcbb:bbbb:1:fe30::/64", + "prefixLen": 64, + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "Vrf30", + "active": true, + "weight": 1, + "seg6local": { + "action": "End.DT46" + }, + "seg6localContext": { + "table": 30 + } + } + ] + } + ] +}
\ No newline at end of file diff --git a/tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_2.json b/tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_2.json new file mode 100644 index 0000000000..b5801d354b --- /dev/null +++ b/tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_2.json @@ -0,0 +1,72 @@ +{ + "fcbb:bbbb:1:fe10::/64": [ + { + "prefix": "fcbb:bbbb:1:fe10::/64", + "prefixLen": 64, + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "Vrf10", + "active": true, + "weight": 1, + "seg6local": { + "action": "End.DT4" + }, + "seg6localContext": { + "table": 10 + } + } + ] + } + ], + "fcbb:bbbb:1:fe30::/64": [ + { + "prefix": "fcbb:bbbb:1:fe30::/64", + "prefixLen": 64, + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "Vrf30", + "active": true, + "weight": 1, + "seg6local": { + "action": "End.DT46" + }, + "seg6localContext": { + "table": 30 + } + } + ] + } + ] +}
\ No newline at end of file diff --git a/tests/topotests/static_srv6_sids/r1/frr.conf b/tests/topotests/static_srv6_sids/r1/frr.conf new file mode 100644 index 0000000000..b4904d9ac2 --- /dev/null +++ b/tests/topotests/static_srv6_sids/r1/frr.conf @@ -0,0 +1,17 @@ +hostname r1 +! +segment-routing + srv6 + locators + locator MAIN + prefix fcbb:bbbb:1::/48 block-len 32 node-len 16 func-bits 16 + ! + ! + static-sids + sid fcbb:bbbb:1::/48 locator MAIN behavior uN + sid fcbb:bbbb:1:fe10::/64 locator MAIN behavior uDT4 vrf Vrf10 + sid fcbb:bbbb:1:fe20::/64 locator MAIN behavior uDT6 vrf Vrf20 + sid fcbb:bbbb:1:fe30::/64 locator MAIN behavior uDT46 vrf Vrf30 + ! + ! +!
\ No newline at end of file diff --git a/tests/topotests/static_srv6_sids/r1/setup.sh b/tests/topotests/static_srv6_sids/r1/setup.sh new file mode 100644 index 0000000000..040be73914 --- /dev/null +++ b/tests/topotests/static_srv6_sids/r1/setup.sh @@ -0,0 +1,13 @@ +ip link add sr0 type dummy +ip link set sr0 up + +ip link add Vrf10 type vrf table 10 +ip link set Vrf10 up + +ip link add Vrf20 type vrf table 20 +ip link set Vrf20 up + +ip link add Vrf30 type vrf table 30 +ip link set Vrf30 up + +sysctl -w net.vrf.strict_mode=1 diff --git a/tests/topotests/static_srv6_sids/test_static_srv6_sids.py b/tests/topotests/static_srv6_sids/test_static_srv6_sids.py new file mode 100755 index 0000000000..cdcc6fd29e --- /dev/null +++ b/tests/topotests/static_srv6_sids/test_static_srv6_sids.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# test_static_srv6_sids.py +# +# Copyright (c) 2025 by +# Alibaba Inc, Yuqing Zhao <galadriel.zyq@alibaba-inc.com> +# Lingyu Zhang <hanyu.zly@alibaba-inc.com> +# + +""" +test_static_srv6_sids.py: +Test for SRv6 static route on zebra +""" + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +pytestmark = [pytest.mark.staticd] + + +def open_json_file(filename): + try: + with open(filename, "r") as f: + return json.load(f) + except IOError: + assert False, "Could not read file {}".format(filename) + + +def setup_module(mod): + tgen = Topogen({None: "r1"}, mod.__name__) + tgen.start_topology() + for rname, router in tgen.routers().items(): + router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname)) + router.load_frr_config("frr.conf") + tgen.start_router() + + +def teardown_module(): + tgen = get_topogen() + tgen.stop_topology() + + +def test_srv6_static_sids(): + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + router = tgen.gears["r1"] + + def _check_srv6_static_sids(router, expected_route_file): + logger.info("checking zebra srv6 static sids") + output = json.loads(router.vtysh_cmd("show ipv6 route static json")) + expected = open_json_file("{}/{}".format(CWD, expected_route_file)) + return topotest.json_cmp(output, expected) + + def check_srv6_static_sids(router, expected_file): + func = functools.partial(_check_srv6_static_sids, router, expected_file) + _, result = topotest.run_and_expect(func, None, count=15, wait=1) + assert result is None, "Failed" + + # FOR DEVELOPER: + # If you want to stop some specific line and start interactive shell, + # please use tgen.mininet_cli() to start it. + + logger.info("Test for srv6 sids configuration") + check_srv6_static_sids(router, "expected_srv6_sids.json") + + +def test_srv6_static_sids_sid_delete(): + """ + Remove the static SID and verify it gets removed + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + router = tgen.gears["r1"] + + def _check_srv6_static_sids(router, expected_route_file): + logger.info("checking zebra srv6 static sids") + output = json.loads(router.vtysh_cmd("show ipv6 route static json")) + expected = open_json_file("{}/{}".format(CWD, expected_route_file)) + return topotest.json_cmp(output, expected) + + def check_srv6_static_sids(router, expected_file): + func = functools.partial(_check_srv6_static_sids, router, expected_file) + _, result = topotest.run_and_expect(func, None, count=15, wait=1) + assert result is None, "Failed" + + router.vtysh_cmd( + """ + configure terminal + segment-routing + srv6 + static-sids + no sid fcbb:bbbb:1::/48 + """ + ) + + # FOR DEVELOPER: + # If you want to stop some specific line and start interactive shell, + # please use tgen.mininet_cli() to start it. + + logger.info("Test for srv6 sids configuration") + check_srv6_static_sids(router, "expected_srv6_sids_sid_delete_1.json") + + router.vtysh_cmd( + """ + configure terminal + segment-routing + srv6 + static-sids + no sid fcbb:bbbb:1:fe20::/64 locator MAIN behavior uDT6 vrf Vrf20 + """ + ) + + # FOR DEVELOPER: + # If you want to stop some specific line and start interactive shell, + # please use tgen.mininet_cli() to start it. + + logger.info("Test for srv6 sids configuration") + check_srv6_static_sids(router, "expected_srv6_sids_sid_delete_2.json") + + +def test_srv6_static_sids_sid_readd(): + """ + Re-add the static SID and verify the routing table + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + router = tgen.gears["r1"] + + def _check_srv6_static_sids(router, expected_route_file): + logger.info("checking zebra srv6 static sids") + output = json.loads(router.vtysh_cmd("show ipv6 route static json")) + expected = open_json_file("{}/{}".format(CWD, expected_route_file)) + return topotest.json_cmp(output, expected) + + def check_srv6_static_sids(router, expected_file): + func = functools.partial(_check_srv6_static_sids, router, expected_file) + _, result = topotest.run_and_expect(func, None, count=15, wait=1) + assert result is None, "Failed" + + router.vtysh_cmd( + """ + configure terminal + segment-routing + srv6 + static-sids + sid fcbb:bbbb:1::/48 locator MAIN behavior uN + sid fcbb:bbbb:1:fe20::/64 locator MAIN behavior uDT6 vrf Vrf20 + """ + ) + + # FOR DEVELOPER: + # If you want to stop some specific line and start interactive shell, + # please use tgen.mininet_cli() to start it. + + logger.info("Test for srv6 sids configuration") + check_srv6_static_sids(router, "expected_srv6_sids.json") + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/v6_nexthop_group_recursive_resolution/r1/frr.conf b/tests/topotests/v6_nexthop_group_recursive_resolution/r1/frr.conf new file mode 100644 index 0000000000..f4da11af06 --- /dev/null +++ b/tests/topotests/v6_nexthop_group_recursive_resolution/r1/frr.conf @@ -0,0 +1,4 @@ +int r1-eth0 + ipv6 address fc00::1/64 + +ipv6 route 1::1/128 fc00::2 diff --git a/tests/topotests/v6_nexthop_group_recursive_resolution/test_v6_nexthop_group_recursive_resolution.py b/tests/topotests/v6_nexthop_group_recursive_resolution/test_v6_nexthop_group_recursive_resolution.py new file mode 100644 index 0000000000..587a951c85 --- /dev/null +++ b/tests/topotests/v6_nexthop_group_recursive_resolution/test_v6_nexthop_group_recursive_resolution.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# Copyright (c) 2024 by Nvidia Corporation +# Donald Sharp +# + +""" +Check that the v6 nexthop recursive resolution works when it changes +""" + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.common_config import step + +pytestmark = [pytest.mark.staticd] + + +def build_topo(tgen): + + tgen.add_router("r1") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + + +def setup_module(mod): + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for rname, router in router_list.items(): + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)), + [(TopoRouter.RD_MGMTD, None), + (TopoRouter.RD_ZEBRA, None), + (TopoRouter.RD_STATIC, None), + (TopoRouter.RD_SHARP, None)]) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_recursive_v6_nexthop_generation(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Testing v6 nexthop resolution") + + #assert False + router = tgen.gears["r1"] + + def _v6_converge_1_1_initial(): + output = json.loads( + router.vtysh_cmd("show ipv6 route 1::1 json")) + + expected = { + "1::1/128":[ + { + "prefix":"1::1/128", + "prefixLen":128, + "protocol":"static", + "vrfName":"default", + "selected":True, + "destSelected":True, + "distance":1, + "metric":0, + "installed":True, + "table":254, + "nexthops":[ + { + "fib":True, + "ip":"fc00::2", + "afi":"ipv6", + "interfaceName":"r1-eth0", + "active":True, + "weight":1 + } + ] + } + ] + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_v6_converge_1_1_initial) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to install v6 1::1 route" + + router.vtysh_cmd("sharp install routes 2::2 nexthop 1::1 1") + router.vtysh_cmd("conf\nipv6 route 1::1/128 fc00::3\nno ipv6 route 1::1/128 fc00::2") + + def _v6_converge_1_1_post(): + output = json.loads( + router.vtysh_cmd("show ipv6 route 1::1 json")) + + expected = { + "1::1/128":[ + { + "prefix":"1::1/128", + "prefixLen":128, + "protocol":"static", + "vrfName":"default", + "selected":True, + "destSelected":True, + "distance":1, + "metric":0, + "installed":True, + "table":254, + "nexthops":[ + { + "fib":True, + "ip":"fc00::3", + "afi":"ipv6", + "interfaceName":"r1-eth0", + "active":True, + "weight":1 + } + ] + } + ] + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_v6_converge_1_1_post) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to change v6 1::1 route" + + router.vtysh_cmd("sharp install routes 2::2 nexthop 1::1 1") + + def _v6_change_2_2_post(): + output = json.loads( + router.vtysh_cmd("show ipv6 route 2::2 json")) + + expected = { + "2::2/128":[ + { + "prefix":"2::2/128", + "prefixLen":128, + "protocol":"sharp", + "vrfName":"default", + "selected":True, + "destSelected":True, + "distance":150, + "metric":0, + "installed":True, + "table":254, + "nexthops":[ + { + "fib":True, + "ip":"fc00::3", + "afi":"ipv6", + "interfaceName":"r1-eth0", + "active":True, + "weight":1 + } + ] + } + ] + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_v6_change_2_2_post) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see sharpd route correctly" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tools/checkpatch.pl b/tools/checkpatch.pl index 2c773f7fbc..9c5eb323be 100755 --- a/tools/checkpatch.pl +++ b/tools/checkpatch.pl @@ -6285,13 +6285,14 @@ sub process { while ($line =~ /(?:^|")([X\t]*)(?:"|$)/g) { my $string = substr($rawline, $-[1], $+[1] - $-[1]); $string =~ s/%%/__/g; - # check for %L - if ($show_L && $string =~ /%[\*\d\.\$]*L([diouxX])/) { - WARN("PRINTF_L", - "\%L$1 is non-standard C, use %ll$1\n" . $herecurr); - $show_L = 0; - } - # check for %Z + # check for %L + # OK in FRR + # if ($show_L && $string =~ /%[\*\d\.\$]*L([diouxX])/) { + # WARN("PRINTF_L", + # "\%L$1 is non-standard C, use %ll$1\n" . $herecurr); + # $show_L = 0; + # } + # check for %Z if ($show_Z && $string =~ /%[\*\d\.\$]*Z([diouxX])/) { WARN("PRINTF_Z", "%Z$1 is non-standard C, use %z$1\n" . $herecurr); diff --git a/tools/frr-reload.py b/tools/frr-reload.py index dba50b3c53..f124cae713 100755 --- a/tools/frr-reload.py +++ b/tools/frr-reload.py @@ -237,6 +237,14 @@ def get_normalized_interface_vrf(line): return line +def get_normalized_ebgp_multihop_line(line): + obj = re.search(r"(.*)ebgp-multihop\s+255", line) + if obj: + line = obj.group(1) + "ebgp-multihop" + + return line + + # This dictionary contains a tree of all commands that we know start a # new multi-line context. All other commands are treated either as # commands inside a multi-line context or as single-line contexts. This @@ -382,6 +390,9 @@ class Config(object): if ":" in line: line = get_normalized_mac_ip_line(line) + if "ebgp-multihop" in line: + line = get_normalized_ebgp_multihop_line(line) + # vrf static routes can be added in two ways. The old way is: # # "ip route x.x.x.x/x y.y.y.y vrf <vrfname>" @@ -948,10 +959,14 @@ def bgp_remove_neighbor_cfg(lines_to_del, del_nbr_dict): lines_to_del_to_del = [] for ctx_keys, line in lines_to_del: + # lines_to_del has following + # (('router bgp 100',), 'neighbor swp1.10 interface peer-group dpeergrp_2'), + # (('router bgp 100',), 'neighbor swp1.10 advertisement-interval 1'), + # (('router bgp 100',), 'no neighbor swp1.10 capability dynamic'), if ( ctx_keys[0].startswith("router bgp") and line - and line.startswith("neighbor ") + and ((line.startswith("neighbor ") or line.startswith("no neighbor "))) ): if ctx_keys[0] in del_nbr_dict: for nbr in del_nbr_dict[ctx_keys[0]]: @@ -1753,12 +1768,13 @@ def compare_context_objects(newconf, running): delete_bgpd = True lines_to_del.append((running_ctx_keys, None)) - # We cannot do 'no interface' or 'no vrf' in FRR, and so deal with it - elif ( - running_ctx_keys[0].startswith("interface") - or running_ctx_keys[0].startswith("vrf") - or running_ctx_keys[0].startswith("router pim") - ): + elif running_ctx_keys[0].startswith("interface"): + lines_to_del.append((running_ctx_keys, None)) + + # We cannot do 'no vrf' in FRR, and so deal with it + elif running_ctx_keys[0].startswith("vrf") or running_ctx_keys[ + 0 + ].startswith("router pim"): for line in running_ctx.lines: lines_to_del.append((running_ctx_keys, line)) diff --git a/tools/gen_northbound_callbacks.c b/tools/gen_northbound_callbacks.c index 516743acab..87ba43eaa2 100644 --- a/tools/gen_northbound_callbacks.c +++ b/tools/gen_northbound_callbacks.c @@ -15,12 +15,13 @@ #include "yang.h" #include "northbound.h" -static bool static_cbs; +static bool f_static_cbs; +static bool f_new_cbs; static void __attribute__((noreturn)) usage(int status) { extern const char *__progname; - fprintf(stderr, "usage: %s [-h] [-s] [-p path]* MODULE\n", __progname); + fprintf(stderr, "usage: %s [-h] [-n] [-s] [-p path]* MODULE\n", __progname); exit(status); } @@ -111,6 +112,14 @@ static struct nb_callback_info nb_config_write = { .arguments = "struct vty *vty, const struct lyd_node *dnode, bool show_defaults", }; +static struct nb_callback_info nb_oper_get = { + .operation = NB_CB_GET_ELEM, + .return_type = "enum nb_error ", + .return_value = "NB_OK", + .arguments = + "const struct nb_node *nb_node, const void *parent_list_entry, struct lyd_node *parent", +}; + static void replace_hyphens_by_underscores(char *str) { char *p; @@ -120,6 +129,14 @@ static void replace_hyphens_by_underscores(char *str) *p++ = '_'; } +static const char *__operation_name(enum nb_cb_operation operation) +{ + if (f_new_cbs && operation == NB_CB_GET_ELEM) + return "get"; + else + return nb_cb_operation_name(operation); +} + static void generate_callback_name(const struct lysc_node *snode, enum nb_cb_operation operation, char *buffer, size_t size) @@ -143,7 +160,7 @@ static void generate_callback_name(const struct lysc_node *snode, strlcat(buffer, snode->name, size); strlcat(buffer, "_", size); } - strlcat(buffer, nb_cb_operation_name(operation), size); + strlcat(buffer, __operation_name(operation), size); list_delete(&snodes); replace_hyphens_by_underscores(buffer); @@ -208,17 +225,23 @@ static int generate_prototypes(const struct lysc_node *snode, void *arg) return YANG_ITER_CONTINUE; } - for (struct nb_callback_info *cb = &nb_callbacks[0]; - cb->operation != -1; cb++) { + for (struct nb_callback_info *cb = &nb_callbacks[0]; cb->operation != -1; cb++) { char cb_name[BUFSIZ]; if (cb->optional || !nb_cb_operation_is_valid(cb->operation, snode)) continue; + if (f_new_cbs && cb->operation == NB_CB_GET_NEXT && snode->nodetype == LYS_LEAFLIST) + continue; + generate_callback_name(snode, cb->operation, cb_name, sizeof(cb_name)); - generate_prototype(cb, cb_name); + + if (cb->operation == NB_CB_GET_ELEM && f_new_cbs) + generate_prototype(&nb_oper_get, cb_name); + else + generate_prototype(cb, cb_name); if (cb->need_config_write && need_config_write) { generate_config_write_cb_name(snode, cb_name, @@ -236,8 +259,8 @@ static int generate_prototypes(const struct lysc_node *snode, void *arg) static void generate_callback(const struct nb_callback_info *ncinfo, const char *cb_name) { - printf("%s%s%s(%s)\n{\n", static_cbs ? "static " : "", - ncinfo->return_type, cb_name, ncinfo->arguments); + printf("%s%s%s(%s)\n{\n", f_static_cbs ? "static " : "", ncinfo->return_type, cb_name, + ncinfo->arguments); switch (ncinfo->operation) { case NB_CB_CREATE: @@ -266,8 +289,8 @@ static void generate_callback(const struct nb_callback_info *ncinfo, static void generate_config_write_callback(const struct nb_callback_info *ncinfo, const char *cb_name) { - printf("%s%s%s(%s)\n{\n", static_cbs ? "static " : "", - ncinfo->return_type, cb_name, ncinfo->arguments); + printf("%s%s%s(%s)\n{\n", f_static_cbs ? "static " : "", ncinfo->return_type, cb_name, + ncinfo->arguments); /* Add a comment, since these callbacks may not all be needed. */ printf("\t/* TODO: this cli callback is optional; the cli output may not need to be done at each node. */\n"); @@ -313,9 +336,16 @@ static int generate_callbacks(const struct lysc_node *snode, void *arg) first = false; } + if (f_new_cbs && cb->operation == NB_CB_GET_NEXT && snode->nodetype == LYS_LEAFLIST) + continue; + generate_callback_name(snode, cb->operation, cb_name, sizeof(cb_name)); - generate_callback(cb, cb_name); + + if (cb->operation == NB_CB_GET_ELEM && f_new_cbs) + generate_callback(&nb_oper_get, cb_name); + else + generate_callback(cb, cb_name); if (cb->need_config_write && need_config_write) { generate_config_write_cb_name(snode, cb_name, @@ -371,12 +401,13 @@ static int generate_nb_nodes(const struct lysc_node *snode, void *arg) printf("\t\t\t.cbs = {\n"); first = false; } + if (f_new_cbs && cb->operation == NB_CB_GET_NEXT && + snode->nodetype == LYS_LEAFLIST) + continue; generate_callback_name(snode, cb->operation, cb_name, sizeof(cb_name)); - printf("\t\t\t\t.%s = %s,\n", - nb_cb_operation_name(cb->operation), - cb_name); + printf("\t\t\t\t.%s = %s,\n", __operation_name(cb->operation), cb_name); } else if (cb->need_config_write && need_config_write) { if (first) { yang_snode_get_path(snode, @@ -417,11 +448,14 @@ int main(int argc, char *argv[]) int opt; bool config_pass; - while ((opt = getopt(argc, argv, "hp:s")) != -1) { + while ((opt = getopt(argc, argv, "hnp:s")) != -1) { switch (opt) { case 'h': usage(EXIT_SUCCESS); /* NOTREACHED */ + case 'n': + f_new_cbs = true; + break; case 'p': if (stat(optarg, &st) == -1) { fprintf(stderr, @@ -438,7 +472,7 @@ int main(int argc, char *argv[]) *darr_append(search_paths) = darr_strdup(optarg); break; case 's': - static_cbs = true; + f_static_cbs = true; break; default: usage(EXIT_FAILURE); @@ -477,7 +511,7 @@ int main(int argc, char *argv[]) printf("// SPDX-" "License-Identifier: GPL-2.0-or-later\n\n"); /* Generate callback prototypes. */ - if (!static_cbs) { + if (!f_static_cbs) { printf("/* prototypes */\n"); yang_snodes_iterate(module->info, generate_prototypes, 0, NULL); printf("\n"); diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c index c2eb9159c2..0559e89f92 100644 --- a/vtysh/vtysh.c +++ b/vtysh/vtysh.c @@ -1312,6 +1312,13 @@ static struct cmd_node srv6_node = { .prompt = "%s(config-srv6)# ", }; +static struct cmd_node srv6_sids_node = { + .name = "srv6-sids", + .node = SRV6_SIDS_NODE, + .parent_node = SRV6_NODE, + .prompt = "%s(config-srv6-sids)# ", +}; + static struct cmd_node srv6_locs_node = { .name = "srv6-locators", .node = SRV6_LOCS_NODE, @@ -1685,7 +1692,7 @@ DEFUNSH(VTYSH_REALLYALL, vtysh_end_all, vtysh_end_all_cmd, "end", return vtysh_end(); } -DEFUNSH(VTYSH_ZEBRA, srv6, srv6_cmd, +DEFUNSH(VTYSH_ZEBRA | VTYSH_MGMTD, srv6, srv6_cmd, "srv6", "Segment-Routing SRv6 configuration\n") { @@ -1693,6 +1700,14 @@ DEFUNSH(VTYSH_ZEBRA, srv6, srv6_cmd, return CMD_SUCCESS; } +DEFUNSH(VTYSH_MGMTD, srv6_sids, srv6_sids_cmd, + "static-sids", + "Segment-Routing SRv6 SIDs configuration\n") +{ + vty->node = SRV6_SIDS_NODE; + return CMD_SUCCESS; +} + DEFUNSH(VTYSH_ZEBRA, srv6_locators, srv6_locators_cmd, "locators", "Segment-Routing SRv6 locators configuration\n") @@ -2216,7 +2231,7 @@ DEFUNSH(VTYSH_FABRICD, router_openfabric, router_openfabric_cmd, "router openfab } #endif /* HAVE_FABRICD */ -DEFUNSH(VTYSH_SR, segment_routing, segment_routing_cmd, +DEFUNSH(VTYSH_SR | VTYSH_MGMTD, segment_routing, segment_routing_cmd, "segment-routing", "Configure segment routing\n") { @@ -2608,7 +2623,7 @@ DEFUNSH(VTYSH_VRF, exit_vrf_config, exit_vrf_config_cmd, "exit-vrf", return CMD_SUCCESS; } -DEFUNSH(VTYSH_ZEBRA, exit_srv6_config, exit_srv6_config_cmd, "exit", +DEFUNSH(VTYSH_ZEBRA | VTYSH_MGMTD, exit_srv6_config, exit_srv6_config_cmd, "exit", "Exit from SRv6 configuration mode\n") { if (vty->node == SRV6_NODE) @@ -2624,6 +2639,14 @@ DEFUNSH(VTYSH_ZEBRA, exit_srv6_locs_config, exit_srv6_locs_config_cmd, "exit", return CMD_SUCCESS; } +DEFUNSH(VTYSH_MGMTD, exit_srv6_sids_config, exit_srv6_sids_config_cmd, "exit", + "Exit from SRv6-SIDs configuration mode\n") +{ + if (vty->node == SRV6_SIDS_NODE) + vty->node = SRV6_NODE; + return CMD_SUCCESS; +} + DEFUNSH(VTYSH_ZEBRA, exit_srv6_loc_config, exit_srv6_loc_config_cmd, "exit", "Exit from SRv6-locators configuration mode\n") { @@ -2879,13 +2902,13 @@ DEFUNSH(VTYSH_KEYS, vtysh_quit_keys, vtysh_quit_keys_cmd, "quit", return vtysh_exit_keys(self, vty, argc, argv); } -DEFUNSH(VTYSH_SR, vtysh_exit_sr, vtysh_exit_sr_cmd, "exit", +DEFUNSH(VTYSH_SR | VTYSH_MGMTD, vtysh_exit_sr, vtysh_exit_sr_cmd, "exit", "Exit current mode and down to previous mode\n") { return vtysh_exit(vty); } -DEFUNSH(VTYSH_SR, vtysh_quit_sr, vtysh_quit_sr_cmd, "quit", +DEFUNSH(VTYSH_SR | VTYSH_MGMTD, vtysh_quit_sr, vtysh_quit_sr_cmd, "quit", "Exit current mode and down to previous mode\n") { return vtysh_exit(vty); @@ -5072,6 +5095,7 @@ void vtysh_init_vty(void) install_node(&rmap_node); install_node(&vty_node); install_node(&srv6_node); + install_node(&srv6_sids_node); install_node(&srv6_locs_node); install_node(&srv6_loc_node); install_node(&srv6_encap_node); @@ -5518,6 +5542,10 @@ void vtysh_init_vty(void) install_element(SRV6_NODE, &exit_srv6_config_cmd); install_element(SRV6_NODE, &vtysh_end_all_cmd); install_element(SRV6_NODE, &srv6_encap_cmd); + install_element(SRV6_NODE, &srv6_sids_cmd); + + install_element(SRV6_SIDS_NODE, &exit_srv6_sids_config_cmd); + install_element(SRV6_SIDS_NODE, &vtysh_end_all_cmd); install_element(SRV6_LOCS_NODE, &srv6_locator_cmd); install_element(SRV6_LOCS_NODE, &exit_srv6_locs_config_cmd); diff --git a/yang/frr-backend.yang b/yang/frr-backend.yang new file mode 100644 index 0000000000..7149cbb991 --- /dev/null +++ b/yang/frr-backend.yang @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: BSD-2-Clause +module frr-backend { + yang-version 1.1; + namespace "http://frrouting.org/yang/oper"; + prefix frr-backend; + + organization + "FRRouting"; + contact + "FRR Users List: <mailto:frog@lists.frrouting.org> + FRR Development List: <mailto:dev@lists.frrouting.org>"; + description + "This module defines a model for FRR backend management. + + Copyright (c) 2024, LabN Consulting, L.L.C. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."; + + revision 2024-12-29 { + description "Initial revision"; + reference "FRR source code"; + } + + container clients { + config false; + description "The backend clients"; + + list client { + key name; + description "A backend client"; + + leaf name { + type string; + description "Name of the backend client"; + } + + container state { + description "FRR backend operational state"; + + leaf candidate-config-version { + type uint64; + description "Local candidate config version."; + } + leaf running-config-version { + type uint64; + description "Local running config version."; + } + leaf edit-count { + type uint64; + description "Number of config edits handled."; + } + leaf avg-edit-time { + type uint64; + description "Average edit time in microseconds."; + } + leaf prep-count { + type uint64; + description "Number of config preps handled."; + } + leaf avg-prep-time { + type uint64; + description "Average prep time in microseconds."; + } + leaf apply-count { + type uint64; + description "Number of config applies handled."; + } + leaf avg-apply-time { + type uint64; + description "Average apply time in microseconds."; + } + leaf-list notify-selectors { + type string; + description + "List of paths identifying which state to send change + notifications for."; + } + } + } + } +} diff --git a/yang/frr-bgp-route-map.yang b/yang/frr-bgp-route-map.yang index 5f701d514c..efb0b2fa08 100644 --- a/yang/frr-bgp-route-map.yang +++ b/yang/frr-bgp-route-map.yang @@ -148,6 +148,12 @@ module frr-bgp-route-map { "Match BGP community list"; } + identity match-community-limit { + base frr-route-map:rmap-match-type; + description + "Match BGP community limit count"; + } + identity match-large-community { base frr-route-map:rmap-match-type; description @@ -802,6 +808,17 @@ identity set-extcommunity-color { } } + case community-limit { + when "derived-from-or-self(../frr-route-map:condition, 'frr-bgp-route-map:match-community-limit')"; + description + "Match BGP updates when the list of communities count is less than the configured limit."; + leaf community-limit { + type uint16 { + range "1..1024"; + } + } + } + case comm-list-name { when "derived-from-or-self(../frr-route-map:condition, 'frr-bgp-route-map:match-community') or " + "derived-from-or-self(../frr-route-map:condition, 'frr-bgp-route-map:match-large-community') or " diff --git a/yang/frr-pim.yang b/yang/frr-pim.yang index 8dadf4fd7c..6b6870f666 100644 --- a/yang/frr-pim.yang +++ b/yang/frr-pim.yang @@ -202,11 +202,29 @@ module frr-pim { description "A grouping defining per address family pim global attributes"; - leaf mcast-rpf-lookup { - type mcast-rpf-lookup-mode; - default "none"; + list mcast-rpf-lookup { + key "group-list source-list"; description - "Multicast RPF lookup behavior."; + "RPF lookup modes."; + + leaf group-list { + type plist-ref; + description + "Multicast group prefix list."; + } + + leaf source-list { + type plist-ref; + description + "Unicast source address prefix list."; + } + + leaf mode { + type mcast-rpf-lookup-mode; + default "none"; + description + "Multicast RPF lookup behavior."; + } } leaf ecmp { diff --git a/yang/frr-staticd.yang b/yang/frr-staticd.yang index 1e6c54c006..8d0e58c0a5 100644 --- a/yang/frr-staticd.yang +++ b/yang/frr-staticd.yang @@ -20,6 +20,10 @@ module frr-staticd { prefix frr-bfdd; } + import frr-vrf { + prefix frr-vrf; + } + organization "FRRouting"; contact @@ -92,6 +96,64 @@ module frr-staticd { } } + typedef srv6-behavior-codepoint { + description + "SRv6 Endpoint Behaviors Codepoints as per + https://www.iana.org/assignments/segment-routing/segment-routing.xhtml."; + type enumeration { + enum End { + value 1; + description + "This enum indicates End endpoint behavior."; + } + enum End.X { + value 5; + description + "This enum indicates End.X endpoint behavior."; + } + enum End.DT6 { + value 18; + description + "This enum indicates End.DT6 endpoint behavior."; + } + enum End.DT4 { + value 19; + description + "This enum indicates End.DT4 endpoint behavior."; + } + enum End.DT46 { + value 20; + description + "This enum indicates End.DT46 endpoint behavior."; + } + enum uN { + value 43; + description + "This enum indicates End with NEXT-CSID endpoint behavior."; + } + enum uA { + value 52; + description + "This enum indicates End.X with NEXT-CSID endpoint behavior."; + } + enum uDT6 { + value 62; + description + "This enum indicates End.DT6 with NEXT-CSID endpoint behavior."; + } + enum uDT4 { + value 63; + description + "This enum indicates End.DT4 with NEXT-CSID endpoint behavior."; + } + enum uDT46 { + value 64; + description + "This enum indicates End.DT46 with NEXT-CSID endpoint behavior."; + } + } + } + augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol" { container staticd { when "../frr-rt:type = 'frr-staticd:staticd'" { @@ -103,7 +165,7 @@ module frr-staticd { "Support for a 'staticd' pseudo-protocol instance consists of a list of routes."; list route-list { - key "prefix afi-safi"; + key "prefix src-prefix afi-safi"; description "List of staticd IP routes."; leaf prefix { @@ -111,6 +173,11 @@ module frr-staticd { description "IP prefix."; } + leaf src-prefix { + type inet:ipv6-prefix; + description + "IPv6 source prefix for dst-src routes"; + } leaf afi-safi { type identityref { base frr-rt:afi-safi-type; @@ -118,6 +185,12 @@ module frr-staticd { description "AFI-SAFI type."; } + /* note dst-src routes are semantically invalid in MRIB */ + must "afi-safi = 'frr-rt:ipv6-unicast' + or afi-safi = 'frr-rt:ipv6-labeled-unicast' + or afi-safi = 'frr-rt:l3vpn-ipv6-unicast' + or src-prefix = '::/0' + "; uses staticd-prefix-attributes { augment "path-list/frr-nexthops/nexthop" { @@ -132,18 +205,45 @@ module frr-staticd { } } } + } - list src-list { - key "src-prefix"; - leaf src-prefix { - type inet:ipv6-prefix; + container segment-routing { + description + "Segment Routing configuration."; + container srv6 { + description + "Segment Routing over IPv6 (SRv6) configuration."; + container static-sids { description - "IPv6 source prefix"; + "This container lists the SRv6 Static SIDs instantiated on the local node."; + list sid { + description + "List of SRv6 Static SIDs."; + key "sid"; + leaf sid { + type inet:ipv6-prefix; + description + "Value of the SRv6 SID."; + } + leaf behavior { + type srv6-behavior-codepoint; + description + "Behavior bound to the SRv6 SID."; + } + leaf locator-name { + type string; + description + "SRv6 locator name."; + } + leaf vrf-name { + type frr-vrf:vrf-ref; + description + "The VRF name."; + } + } } - - uses staticd-prefix-attributes; } } } } -} +}
\ No newline at end of file diff --git a/yang/frr-test-module.yang b/yang/frr-test-module.yang index dcf204a956..773a959553 100644 --- a/yang/frr-test-module.yang +++ b/yang/frr-test-module.yang @@ -7,13 +7,14 @@ module frr-test-module { import ietf-inet-types { prefix inet; } - import ietf-yang-types { - prefix yang; - } import frr-interface { prefix frr-interface; } + organization "placeholder for lint"; + + contact "placeholder for lint"; + description "FRRouting internal testing module. @@ -45,38 +46,56 @@ module frr-test-module { revision 2018-11-26 { description "Initial revision."; + reference "placeholder for lint"; } container frr-test-module { config false; + description "a container for test module data"; container vrfs { + description "a container of vrfs"; list vrf { key "name"; + description "a keyed vrf list object"; leaf name { type string; + description "name of vrf"; } container interfaces { + description "container of leaf-list interfaces"; leaf-list interface { type frr-interface:interface-ref; + description "leaf list interface object"; + } + leaf-list interface-new { + type frr-interface:interface-ref; + description "second leaf list interface object"; } } container routes { + description "container of key-less route objects"; list route { + description "a key-less route object"; leaf prefix { type inet:ipv4-prefix; + description "prefix of the route object"; } leaf next-hop { type inet:ipv4-address; + description "nexthop of the route object"; } leaf interface { type frr-interface:interface-ref; + description "interface of the route object"; } leaf metric { type uint8; + description "metric of the route object"; } leaf active { type empty; + description "active status of the route object"; } } } @@ -84,16 +103,19 @@ module frr-test-module { input { leaf data { type string; + description "data input to ping action."; } } output { leaf vrf { type string; + description "vrf returned from ping action."; } // can't use the same name in input and output // because of a bug in libyang < 2.1.148 leaf data-out { type string; + description "data return from ping action."; } } } diff --git a/yang/subdir.am b/yang/subdir.am index 786bd0bca6..9d4bc8e78d 100644 --- a/yang/subdir.am +++ b/yang/subdir.am @@ -20,6 +20,7 @@ EXTRA_DIST += yang/embedmodel.py # without problems, as seen in libfrr. dist_yangmodels_DATA += yang/frr-affinity-map.yang +dist_yangmodels_DATA += yang/frr-backend.yang dist_yangmodels_DATA += yang/frr-filter.yang dist_yangmodels_DATA += yang/frr-module-translator.yang dist_yangmodels_DATA += yang/frr-nexthop.yang diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c index 3ec1c9d657..b8dbabb60d 100644 --- a/zebra/dplane_fpm_nl.c +++ b/zebra/dplane_fpm_nl.c @@ -587,6 +587,7 @@ static void fpm_read(struct event *t) struct zebra_dplane_ctx *ctx; size_t available_bytes; size_t hdr_available_bytes; + int ival; /* Let's ignore the input at the moment. */ rv = stream_read_try(fnc->ibuf, fnc->socket, @@ -715,17 +716,28 @@ static void fpm_read(struct event *t) break; } + /* Parse the route data into a dplane ctx, then + * enqueue it to zebra for processing. + */ ctx = dplane_ctx_alloc(); dplane_ctx_route_init(ctx, DPLANE_OP_ROUTE_NOTIFY, NULL, NULL); - if (netlink_route_change_read_unicast_internal( - hdr, 0, false, ctx) != 1) { - dplane_ctx_fini(&ctx); - stream_pulldown(fnc->ibuf); + + if (netlink_route_notify_read_ctx(hdr, 0, ctx) >= 0) { + /* In the FPM encoding, the vrfid is present */ + ival = dplane_ctx_get_table(ctx); + dplane_ctx_set_vrf(ctx, ival); + dplane_ctx_set_table(ctx, + ZEBRA_ROUTE_TABLE_UNKNOWN); + + dplane_provider_enqueue_to_zebra(ctx); + } else { /* * Let's continue to read other messages * Even if we ignore this one. */ + dplane_ctx_fini(&ctx); + stream_pulldown(fnc->ibuf); } break; default: diff --git a/zebra/interface.c b/zebra/interface.c index 1c86a6a5c7..e49e8eac5e 100644 --- a/zebra/interface.c +++ b/zebra/interface.c @@ -81,7 +81,7 @@ static void if_zebra_speed_update(struct event *thread) if (new_speed != ifp->speed) { zlog_info("%s: %s old speed: %u new speed: %u", __func__, ifp->name, ifp->speed, new_speed); - ifp->speed = new_speed; + if_update_state_speed(ifp, new_speed); if_add_update(ifp); changed = true; } @@ -1563,17 +1563,20 @@ static inline void zebra_if_set_ziftype(struct interface *ifp, static void interface_update_hw_addr(struct zebra_dplane_ctx *ctx, struct interface *ifp) { - int i; + uint8_t hw_addr[INTERFACE_HWADDR_MAX]; + uint i, hw_addr_len; - ifp->hw_addr_len = dplane_ctx_get_ifp_hw_addr_len(ctx); - memcpy(ifp->hw_addr, dplane_ctx_get_ifp_hw_addr(ctx), ifp->hw_addr_len); + hw_addr_len = dplane_ctx_get_ifp_hw_addr_len(ctx); + memcpy(hw_addr, dplane_ctx_get_ifp_hw_addr(ctx), hw_addr_len); - for (i = 0; i < ifp->hw_addr_len; i++) - if (ifp->hw_addr[i] != 0) + for (i = 0; i < hw_addr_len; i++) + if (hw_addr[i] != 0) break; - if (i == ifp->hw_addr_len) - ifp->hw_addr_len = 0; + if (i == hw_addr_len) + hw_addr_len = 0; + + if_update_state_hw_addr(ifp, hw_addr, hw_addr_len); } static void interface_update_l2info(struct zebra_dplane_ctx *ctx, @@ -1984,9 +1987,10 @@ static void zebra_if_dplane_ifp_handling(struct zebra_dplane_ctx *ctx) /* Update interface information. */ set_ifindex(ifp, ifindex, zns); ifp->flags = flags; - ifp->mtu6 = ifp->mtu = mtu; - ifp->metric = 0; - ifp->speed = kernel_get_speed(ifp, NULL); + if_update_state_mtu(ifp, mtu); + if_update_state_mtu6(ifp, mtu); + if_update_state_metric(ifp, 0); + if_update_state_speed(ifp, kernel_get_speed(ifp, NULL)); ifp->ptm_status = ZEBRA_PTM_STATUS_UNKNOWN; ifp->txqlen = dplane_ctx_get_intf_txqlen(ctx); @@ -2036,6 +2040,7 @@ static void zebra_if_dplane_ifp_handling(struct zebra_dplane_ctx *ctx) IS_ZEBRA_IF_BRIDGE_VLAN_AWARE( zif)); } + // if_update_state(ifp); } else if (ifp->vrf->vrf_id != vrf_id) { /* VRF change for an interface. */ if (IS_ZEBRA_DEBUG_KERNEL) @@ -2058,8 +2063,9 @@ static void zebra_if_dplane_ifp_handling(struct zebra_dplane_ctx *ctx) (unsigned long long)flags); set_ifindex(ifp, ifindex, zns); - ifp->mtu6 = ifp->mtu = mtu; - ifp->metric = 0; + if_update_state_mtu(ifp, mtu); + if_update_state_mtu6(ifp, mtu); + if_update_state_metric(ifp, 0); ifp->txqlen = dplane_ctx_get_intf_txqlen(ctx); /* diff --git a/zebra/main.c b/zebra/main.c index 4546d14770..fd242e762a 100644 --- a/zebra/main.c +++ b/zebra/main.c @@ -287,6 +287,7 @@ struct frr_signal_t zebra_signals[] = { /* clang-format off */ static const struct frr_yang_module_info *const zebra_yang_modules[] = { + &frr_backend_info, &frr_filter_info, &frr_interface_info, &frr_route_map_info, @@ -356,7 +357,8 @@ int main(int argc, char **argv) zserv_path = NULL; - vrf_configure_backend(VRF_BACKEND_VRF_LITE); + if_notify_oper_changes = true; + vrf_notify_oper_changes = true; frr_preinit(&zebra_di, argc, argv); @@ -375,7 +377,7 @@ int main(int argc, char **argv) " --v6-with-v4-nexthops Underlying dataplane supports v6 routes with v4 nexthops\n" #ifdef HAVE_NETLINK " -s, --nl-bufsize Set netlink receive buffer size\n" - " -n, --vrfwnetns Use NetNS as VRF backend\n" + " -n, --vrfwnetns Use NetNS as VRF backend (deprecated, use -w)\n" " --v6-rr-semantics Use v6 RR semantics\n" #else " -s, Set kernel socket receive buffer size\n" @@ -436,6 +438,8 @@ int main(int argc, char **argv) break; #ifdef HAVE_NETLINK case 'n': + fprintf(stderr, + "The -n option is deprecated, please use global -w option instead.\n"); vrf_configure_backend(VRF_BACKEND_NETNS); break; case OPTION_V6_RR_SEMANTICS: diff --git a/zebra/redistribute.c b/zebra/redistribute.c index 66dc5b4b5f..9bf7e2cbb5 100644 --- a/zebra/redistribute.c +++ b/zebra/redistribute.c @@ -82,8 +82,8 @@ static void zebra_redistribute_default(struct zserv *client, vrf_id_t vrf_id) RNODE_FOREACH_RE (rn, newre) { if (CHECK_FLAG(newre->flags, ZEBRA_FLAG_SELECTED)) - zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, - client, rn, newre, false); + zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, client, rn, + newre, NULL); } route_unlock_node(rn); @@ -91,6 +91,24 @@ static void zebra_redistribute_default(struct zserv *client, vrf_id_t vrf_id) } /* Redistribute routes. */ +static void redistribute_table_direct(struct zserv *client, int type, const struct route_node *rn, + const struct route_entry *re) +{ + struct redist_table_direct *table; + struct redist_proto *red; + struct listnode *node; + afi_t afi = family2afi(rn->p.family); + + red = &client->mi_redist[afi][ZEBRA_ROUTE_TABLE_DIRECT]; + + for (ALL_LIST_ELEMENTS_RO(red->instances, node, table)) { + if (table->table_id != (int)re->table) + continue; + + zsend_redistribute_route(type, client, rn, re, &table->vrf_id); + } +} + static void zebra_redistribute(struct zserv *client, int type, unsigned short instance, struct zebra_vrf *zvrf, int afi) @@ -102,13 +120,9 @@ static void zebra_redistribute(struct zserv *client, int type, vrf_id_t vrf_id = zvrf_id(zvrf); if (type == ZEBRA_ROUTE_TABLE_DIRECT) { - if (vrf_id == VRF_DEFAULT) { - table = zebra_router_find_table(zvrf, instance, afi, - SAFI_UNICAST); - type = ZEBRA_ROUTE_ALL; - is_table_direct = true; - } else - return; + table = zebra_router_find_table(zvrf, instance, afi, SAFI_UNICAST); + type = ZEBRA_ROUTE_ALL; + is_table_direct = true; } else table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id); @@ -140,15 +154,20 @@ static void zebra_redistribute(struct zserv *client, int type, if (!zebra_check_addr(&rn->p)) continue; - zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, - client, rn, newre, is_table_direct); + if (is_table_direct) + redistribute_table_direct(client, ZEBRA_REDISTRIBUTE_ROUTE_ADD, rn, + newre); + else + zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, client, rn, + newre, NULL); } } /* - * Function to return a valid table id value if table-direct is used - * return 0 otherwise - * This function can be called only if zebra_redistribute_check returns TRUE + * Checks if the route entry can be used as table-direct or not. + * `table-direct` routes always belong to `VRF_DEFAULT` and has an table + * ID different than the VRF it belongs (example main VRF table is 254, + * so in order to be `table-direct` the route's table ID must be != 254). */ static bool zebra_redistribute_is_table_direct(const struct route_entry *re) { @@ -177,15 +196,14 @@ static bool zebra_redistribute_check(const struct route_node *rn, afi = family2afi(rn->p.family); zvrf = zebra_vrf_lookup_by_id(re->vrf_id); - if (re->vrf_id == VRF_DEFAULT && zvrf->table_id != re->table) { + if (zvrf->table_id != re->table) { + /* + * Routes with table ID different from VRFs can be used as + * `table-direct` if enabled. + */ if (re->table && - redist_check_instance(&client->mi_redist - [afi][ZEBRA_ROUTE_TABLE_DIRECT], - re->table)) { - /* table-direct redistribution only for route entries which - * are on the default vrf, and that have table id different - * from the default table. - */ + redist_table_direct_has_id(&client->mi_redist[afi][ZEBRA_ROUTE_TABLE_DIRECT], + re->table)) { return true; } return false; @@ -227,7 +245,6 @@ void redistribute_update(const struct route_node *rn, { struct listnode *node, *nnode; struct zserv *client; - bool is_table_direct; if (IS_ZEBRA_DEBUG_RIB) zlog_debug( @@ -242,7 +259,6 @@ void redistribute_update(const struct route_node *rn, return; } - for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { if (zebra_redistribute_check(rn, re, client)) { if (IS_ZEBRA_DEBUG_RIB) { @@ -253,15 +269,19 @@ void redistribute_update(const struct route_node *rn, re->vrf_id, re->table, re->type, re->distance, re->metric); } - is_table_direct = zebra_redistribute_is_table_direct(re); - zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, - client, rn, re, - is_table_direct); + if (zebra_redistribute_is_table_direct(re)) + redistribute_table_direct(client, ZEBRA_REDISTRIBUTE_ROUTE_ADD, rn, + re); + else + zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, client, rn, + re, NULL); } else if (zebra_redistribute_check(rn, prev_re, client)) { - is_table_direct = zebra_redistribute_is_table_direct(prev_re); - zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL, - client, rn, prev_re, - is_table_direct); + if (zebra_redistribute_is_table_direct(prev_re)) + redistribute_table_direct(client, ZEBRA_REDISTRIBUTE_ROUTE_DEL, rn, + prev_re); + else + zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL, client, rn, + prev_re, NULL); } } } @@ -281,7 +301,6 @@ void redistribute_delete(const struct route_node *rn, struct listnode *node, *nnode; struct zserv *client; vrf_id_t vrfid; - bool is_table_direct; if (old_re) vrfid = old_re->vrf_id; @@ -344,10 +363,12 @@ void redistribute_delete(const struct route_node *rn, * happy. */ assert(old_re); - is_table_direct = zebra_redistribute_is_table_direct(old_re); - zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL, - client, rn, old_re, - is_table_direct); + if (zebra_redistribute_is_table_direct(old_re)) + redistribute_table_direct(client, ZEBRA_REDISTRIBUTE_ROUTE_DEL, rn, + old_re); + else + zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL, client, rn, + old_re, NULL); } } } @@ -383,8 +404,16 @@ void zebra_redistribute_add(ZAPI_HANDLER_ARGS) } if (instance) { - if (!redist_check_instance(&client->mi_redist[afi][type], - instance)) { + if (type == ZEBRA_ROUTE_TABLE_DIRECT) { + struct redist_table_direct table = { + .vrf_id = zvrf->vrf->vrf_id, + .table_id = instance, + }; + if (!redist_lookup_table_direct(&client->mi_redist[afi][type], &table)) { + redist_add_table_direct(&client->mi_redist[afi][type], &table); + zebra_redistribute(client, type, instance, zvrf, afi); + } + } else if (!redist_check_instance(&client->mi_redist[afi][type], instance)) { redist_add_instance(&client->mi_redist[afi][type], instance); zebra_redistribute(client, type, instance, zvrf, afi); @@ -443,7 +472,13 @@ void zebra_redistribute_delete(ZAPI_HANDLER_ARGS) * themselves should keep track of the received routes from zebra and * withdraw them when necessary. */ - if (instance) + if (type == ZEBRA_ROUTE_TABLE_DIRECT) { + struct redist_table_direct table = { + .vrf_id = zvrf->vrf->vrf_id, + .table_id = instance, + }; + redist_del_table_direct(&client->mi_redist[afi][type], &table); + } else if (instance) redist_del_instance(&client->mi_redist[afi][type], instance); else vrf_bitmap_unset(&client->redist[afi][type], zvrf_id(zvrf)); diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index 492fe52889..d696b19859 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -723,44 +723,52 @@ static uint16_t parse_multipath_nexthops_unicast(ns_id_t ns_id, struct nexthop_g return nhop_num; } -/* Looking up routing table by netlink interface. */ -int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, - ns_id_t ns_id, int startup, - struct zebra_dplane_ctx *ctx) +/* + * Parse netlink route message and capture info into a dplane ctx. + * Returns <0 if the message is to be skipped (might be an error) + */ +static int netlink_route_read_unicast_ctx(struct nlmsghdr *h, ns_id_t ns_id, + struct rtattr **tb_in, + struct zebra_dplane_ctx *ctx) { + int ret = 0; int len; struct rtmsg *rtm; - struct rtattr *tb[RTA_MAX + 1]; + struct rtattr **tb, *tb_array[RTA_MAX + 1]; uint32_t flags = 0; struct prefix p; - struct prefix_ipv6 src_p = {}; - vrf_id_t vrf_id; + struct prefix src_p = {}; bool selfroute; - - char anyaddr[16] = {0}; - + char anyaddr[16] = {}; int proto = ZEBRA_ROUTE_KERNEL; int index = 0; - int table; + int tableid; int metric = 0; uint32_t mtu = 0; uint8_t distance = 0; route_tag_t tag = 0; - uint32_t nhe_id = 0; - + uint32_t nhg_id = 0; void *dest = NULL; void *gate = NULL; + int gate_len; void *prefsrc = NULL; /* IPv4 preferred source host address */ + int prefsrc_len; void *src = NULL; /* IPv6 srcdest source prefix */ enum blackhole_type bh_type = BLACKHOLE_UNSPEC; + afi_t afi = AFI_IP; + struct ipaddr addr = {}; - frrtrace(3, frr_zebra, netlink_route_change_read_unicast, h, ns_id, - startup); + len = h->nlmsg_len - NLMSG_LENGTH(sizeof(struct rtmsg)); + if (len < 0) { + zlog_err( + "%s: Netlink route message received with invalid size %d %zu", + __func__, h->nlmsg_len, + (size_t)NLMSG_LENGTH(sizeof(struct rtmsg))); + return -1; + } rtm = NLMSG_DATA(h); - if (startup && h->nlmsg_type != RTM_NEWROUTE) - return 0; switch (rtm->rtm_type) { case RTN_UNICAST: break; @@ -778,54 +786,42 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, zlog_debug("Route rtm_type: %s(%d) intentionally ignoring", nl_rttype_to_str(rtm->rtm_type), rtm->rtm_type); - return 0; + ret = -1; + goto done; } - len = h->nlmsg_len - NLMSG_LENGTH(sizeof(struct rtmsg)); - if (len < 0) { - zlog_err( - "%s: Message received from netlink is of a broken size %d %zu", - __func__, h->nlmsg_len, - (size_t)NLMSG_LENGTH(sizeof(struct rtmsg))); - return -1; + if ((rtm->rtm_flags & RTM_F_CLONED) || + (rtm->rtm_protocol == RTPROT_REDIRECT)) { + ret = -1; + goto done; } - netlink_parse_rtattr(tb, RTA_MAX, RTM_RTA(rtm), len); - - if (rtm->rtm_flags & RTM_F_CLONED) - return 0; - if (rtm->rtm_protocol == RTPROT_REDIRECT) - return 0; + /* We don't care about change notifications for the MPLS table. */ + /* TODO: Revisit this. */ + if (rtm->rtm_family == AF_MPLS) { + ret = -1; + goto done; + } - selfroute = is_selfroute(rtm->rtm_protocol); + dplane_ctx_set_ns_id(ctx, ns_id); - if (!startup && selfroute && h->nlmsg_type == RTM_NEWROUTE && - !zrouter.asic_offloaded && !ctx) { - if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("Route type: %d Received that we think we have originated, ignoring", - rtm->rtm_protocol); - return 0; + /* Parse attrs if necessary */ + if (tb_in != NULL) { + tb = tb_in; + } else { + netlink_parse_rtattr(tb_array, RTA_MAX, RTM_RTA(rtm), len); + tb = tb_array; } - /* We don't care about change notifications for the MPLS table. */ - /* TODO: Revisit this. */ - if (rtm->rtm_family == AF_MPLS) - return 0; + selfroute = is_selfroute(rtm->rtm_protocol); /* Table corresponding to route. */ if (tb[RTA_TABLE]) - table = *(int *)RTA_DATA(tb[RTA_TABLE]); + tableid = *(int *)RTA_DATA(tb[RTA_TABLE]); else - table = rtm->rtm_table; - - /* Map to VRF */ - vrf_id = zebra_vrf_lookup_by_table(table, ns_id); - if (vrf_id == VRF_DEFAULT) { - if (!is_zebra_valid_kernel_table(table) - && !is_zebra_main_routing_table(table)) - return 0; - } + tableid = rtm->rtm_table; + /* Map flags values */ if (rtm->rtm_flags & RTM_F_TRAP) flags |= ZEBRA_FLAG_TRAPPED; if (rtm->rtm_flags & RTM_F_OFFLOAD) @@ -836,7 +832,7 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, if (h->nlmsg_flags & NLM_F_APPEND) flags |= ZEBRA_FLAG_OUTOFSYNC; - /* Route which inserted by Zebra. */ + /* Route which was inserted by Zebra. */ if (selfroute) { flags |= ZEBRA_FLAG_SELFROUTE; proto = proto2zebra(rtm->rtm_protocol, rtm->rtm_family, false); @@ -854,14 +850,18 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, else src = anyaddr; - if (tb[RTA_PREFSRC]) + if (tb[RTA_PREFSRC]) { prefsrc = RTA_DATA(tb[RTA_PREFSRC]); + prefsrc_len = RTA_PAYLOAD(tb[RTA_PREFSRC]); + } - if (tb[RTA_GATEWAY]) + if (tb[RTA_GATEWAY]) { gate = RTA_DATA(tb[RTA_GATEWAY]); + gate_len = RTA_PAYLOAD(tb[RTA_GATEWAY]); + } if (tb[RTA_NH_ID]) - nhe_id = *(uint32_t *)RTA_DATA(tb[RTA_NH_ID]); + nhg_id = *(uint32_t *)RTA_DATA(tb[RTA_NH_ID]); if (tb[RTA_PRIORITY]) metric = *(int *)RTA_DATA(tb[RTA_PRIORITY]); @@ -887,7 +887,8 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, zlog_err( "Invalid destination prefix length: %u received from kernel route change", rtm->rtm_dst_len); - return -1; + ret = -1; + goto done; } memcpy(&p.u.prefix4, dest, 4); p.prefixlen = rtm->rtm_dst_len; @@ -895,14 +896,16 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, if (rtm->rtm_src_len != 0) { flog_warn( EC_ZEBRA_UNSUPPORTED_V4_SRCDEST, - "unsupported IPv4 sourcedest route (dest %pFX vrf %u)", - &p, vrf_id); - return 0; + "unsupported IPv4 sourcedest route (dest %pFX table %u)", + &p, tableid); + ret = -1; + goto done; } /* Force debug below to not display anything for source */ src_p.prefixlen = 0; } else if (rtm->rtm_family == AF_INET6) { + afi = AFI_IP6; p.family = AF_INET6; if (rtm->rtm_dst_len > IPV6_MAX_BITLEN) { zlog_err( @@ -920,14 +923,15 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, rtm->rtm_src_len); return -1; } - memcpy(&src_p.prefix, src, 16); + memcpy(&src_p.u.prefix6, src, 16); src_p.prefixlen = rtm->rtm_src_len; } else { /* We only handle the AFs we handle... */ if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug("%s: unknown address-family %u", __func__, rtm->rtm_family); - return 0; + ret = -1; + goto done; } /* @@ -956,6 +960,249 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, char buf2[PREFIX_STRLEN]; zlog_debug( + "%s %pFX%s%s nsid: %u table_id: %u metric: %d Admin Distance: %d", + nl_msg_type_to_str(h->nlmsg_type), &p, + src_p.prefixlen ? " from " : "", + src_p.prefixlen ? prefix2str(&src_p, buf2, sizeof(buf2)) + : "", + ns_id, tableid, metric, distance); + } + + /* Set values in ctx. Note that vrf is not set, because we can only + * resolve the FRR vrf info in the main pthread. + */ + dplane_ctx_set_afi(ctx, afi); + dplane_ctx_set_safi(ctx, SAFI_UNICAST); + dplane_ctx_set_table(ctx, tableid); + dplane_ctx_set_vrf(ctx, VRF_UNKNOWN); + dplane_ctx_set_ns_id(ctx, ns_id); + dplane_ctx_set_dest(ctx, &p); + if (src_p.prefixlen > 0) + dplane_ctx_set_src(ctx, &src_p); + else + dplane_ctx_set_src(ctx, NULL); + dplane_ctx_set_type(ctx, proto); + dplane_ctx_set_flags(ctx, flags); + dplane_ctx_set_route_metric(ctx, metric); + dplane_ctx_set_route_mtu(ctx, mtu); + dplane_ctx_set_distance(ctx, distance); + dplane_ctx_set_tag(ctx, tag); + + dplane_ctx_set_ifindex(ctx, index); + dplane_ctx_set_route_bhtype(ctx, bh_type); + if (prefsrc) { + /* Convert to ipaddr */ + memset(&addr, 0, sizeof(addr)); + + if (afi == AFI_IP) { + SET_IPADDR_V4(&addr); + memcpy(&addr.ipaddr_v4, prefsrc, prefsrc_len); + } else { + SET_IPADDR_V6(&addr); + memcpy(&addr.ipaddr_v6, prefsrc, prefsrc_len); + } + + dplane_ctx_set_route_prefsrc(ctx, &addr); + } else { + dplane_ctx_set_route_prefsrc(ctx, NULL); + } + + if (gate) { + /* Convert to ipaddr */ + memset(&addr, 0, sizeof(addr)); + + if (afi == AFI_IP) { + SET_IPADDR_V4(&addr); + memcpy(&addr.ipaddr_v4, gate, gate_len); + } else { + SET_IPADDR_V6(&addr); + memcpy(&addr.ipaddr_v6, gate, gate_len); + } + + dplane_ctx_set_route_gw(ctx, &addr); + } + + if (nhg_id > 0) + dplane_ctx_set_nhg_id(ctx, nhg_id); + +done: + + return ret; +} + +/* + * Public api for use parsing a route notification message: this notification + * only parses the top-level route attributes, and doesn't include nexthops. + */ +int netlink_route_notify_read_ctx(struct nlmsghdr *h, ns_id_t ns_id, + struct zebra_dplane_ctx *ctx) +{ + /* Use the common parser for route-level netlink message info; + * we expect the caller to have set the context up with the correct + * dplane opcode, and we expect the caller to submit the resulting ctx + * for processing in zebra. + */ + return netlink_route_read_unicast_ctx(h, ns_id, NULL, ctx); +} + +/* + * Parse a route update netlink message, extract and validate its data, + * call into zebra with an update. + */ +static int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, + ns_id_t ns_id, int startup) +{ + int len; + struct rtmsg *rtm; + struct rtattr *tb[RTA_MAX + 1]; + uint32_t flags = 0; + struct prefix p; + struct prefix src_p = {}; + vrf_id_t vrf_id; + bool selfroute; + + int proto = ZEBRA_ROUTE_KERNEL; + int index = 0; + int table; + int metric = 0; + uint32_t mtu = 0; + uint8_t distance = 0; + route_tag_t tag = 0; + uint32_t nhe_id = 0; + void *gate = NULL; + const struct ipaddr *gate_addr; + void *prefsrc = NULL; /* IPv4 preferred source host address */ + const struct ipaddr *prefsrc_addr; + enum blackhole_type bh_type = BLACKHOLE_UNSPEC; + afi_t afi; + struct zebra_dplane_ctx *ctx = NULL; + int ret; + + frrtrace(3, frr_zebra, netlink_route_change_read_unicast, h, ns_id, + startup); + + rtm = NLMSG_DATA(h); + + if (startup && h->nlmsg_type != RTM_NEWROUTE) + return 0; + + switch (rtm->rtm_type) { + case RTN_UNICAST: + case RTN_BLACKHOLE: + case RTN_UNREACHABLE: + case RTN_PROHIBIT: + break; + default: + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug("Route rtm_type: %s(%d) intentionally ignoring", + nl_rttype_to_str(rtm->rtm_type), + rtm->rtm_type); + return 0; + } + + len = h->nlmsg_len - NLMSG_LENGTH(sizeof(struct rtmsg)); + if (len < 0) { + zlog_err( + "%s: Message received from netlink is of a broken size %d %zu", + __func__, h->nlmsg_len, + (size_t)NLMSG_LENGTH(sizeof(struct rtmsg))); + return -1; + } + + if (rtm->rtm_flags & RTM_F_CLONED) + return 0; + if (rtm->rtm_protocol == RTPROT_REDIRECT) + return 0; + + /* We don't care about change notifications for the MPLS table. */ + /* TODO: Revisit this. */ + if (rtm->rtm_family == AF_MPLS) + return 0; + + netlink_parse_rtattr(tb, RTA_MAX, RTM_RTA(rtm), len); + + /* + * Allocate a context object and parse the core parts of the route + * message. + * After this point, note that we need to 'goto done' to exit, + * so that the ctx gets cleaned-up. + */ + ctx = dplane_ctx_alloc(); + + dplane_ctx_route_init(ctx, + h->nlmsg_type == RTM_NEWROUTE ? + DPLANE_OP_ROUTE_INSTALL : + DPLANE_OP_ROUTE_DELETE, NULL, NULL); + + /* Finish parsing the core route info */ + ret = netlink_route_read_unicast_ctx(h, ns_id, tb, ctx); + if (ret < 0) { + ret = 0; + goto done; + } + + flags = dplane_ctx_get_flags(ctx); + + selfroute = CHECK_FLAG(flags, ZEBRA_FLAG_SELFROUTE); + + if (!startup && selfroute && h->nlmsg_type == RTM_NEWROUTE && + !zrouter.asic_offloaded) { + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug("Route type: %d Received that we think we have originated, ignoring", + rtm->rtm_protocol); + ret = 0; + goto done; + } + + /* Table corresponding to route. */ + table = dplane_ctx_get_table(ctx); + + /* Map to VRF: note that this can _only_ be done in the main pthread */ + vrf_id = zebra_vrf_lookup_by_table(table, ns_id); + if (vrf_id == VRF_DEFAULT) { + if (!is_zebra_valid_kernel_table(table) + && !is_zebra_main_routing_table(table)) { + ret = 0; + goto done; + } + } + + /* Route which inserted by Zebra. */ + if (selfroute) + proto = dplane_ctx_get_type(ctx); + + index = dplane_ctx_get_ifindex(ctx); + + p = *(dplane_ctx_get_dest(ctx)); + + if (dplane_ctx_get_src(ctx) == NULL) + src_p.prefixlen = 0; + else + src_p = *(dplane_ctx_get_src(ctx)); + + prefsrc_addr = dplane_ctx_get_route_prefsrc(ctx); + if (prefsrc_addr) + prefsrc = (void *)&(prefsrc_addr->ip.addr); + + gate_addr = dplane_ctx_get_route_gw(ctx); + if (!IS_IPADDR_NONE(gate_addr)) + gate = (void *)&(gate_addr->ip.addr); + + nhe_id = dplane_ctx_get_nhe_id(ctx); + + metric = dplane_ctx_get_metric(ctx); + distance = dplane_ctx_get_distance(ctx); + tag = dplane_ctx_get_tag(ctx); + mtu = dplane_ctx_get_mtu(ctx); + + afi = dplane_ctx_get_afi(ctx); + + bh_type = dplane_ctx_get_route_bhtype(ctx); + + if (IS_ZEBRA_DEBUG_KERNEL) { + char buf2[PREFIX_STRLEN]; + + zlog_debug( "%s %pFX%s%s vrf %s(%u) table_id: %u metric: %d Admin Distance: %d", nl_msg_type_to_str(h->nlmsg_type), &p, src_p.prefixlen ? " from " : "", @@ -965,10 +1212,6 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, distance); } - afi_t afi = AFI_IP; - if (rtm->rtm_family == AF_INET6) - afi = AFI_IP6; - if (h->nlmsg_type == RTM_NEWROUTE) { struct route_entry *re; struct nexthop_group *ng = NULL; @@ -1018,12 +1261,11 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, } } if (nhe_id || ng) { - dplane_rib_add_multipath(afi, SAFI_UNICAST, &p, &src_p, - re, ng, startup, ctx); + rib_add_multipath(afi, SAFI_UNICAST, &p, + (struct prefix_ipv6 *)&src_p, + re, ng, startup); if (ng) nexthop_group_delete(&ng); - if (ctx) - zebra_rib_route_entry_free(re); } else { /* * I really don't see how this is possible @@ -1038,17 +1280,10 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, zebra_rib_route_entry_free(re); } } else { - if (ctx) { - zlog_err( - "%s: %pFX RTM_DELROUTE received but received a context as well", - __func__, &p); - return 0; - } - if (nhe_id) { rib_delete(afi, SAFI_UNICAST, vrf_id, proto, 0, flags, - &p, &src_p, NULL, nhe_id, table, metric, - distance, true); + &p, (struct prefix_ipv6 *)&src_p, NULL, + nhe_id, table, metric, distance, true); } else { if (!tb[RTA_MULTIPATH]) { struct nexthop nh; @@ -1057,26 +1292,33 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, ns_id, rtm, tb, bh_type, index, prefsrc, gate, afi, vrf_id); rib_delete(afi, SAFI_UNICAST, vrf_id, proto, 0, - flags, &p, &src_p, &nh, 0, table, - metric, distance, true); + flags, &p, + (struct prefix_ipv6 *)&src_p, &nh, 0, + table, metric, distance, true); } else { /* XXX: need to compare the entire list of * nexthops here for NLM_F_APPEND stupidity */ rib_delete(afi, SAFI_UNICAST, vrf_id, proto, 0, - flags, &p, &src_p, NULL, 0, table, - metric, distance, true); + flags, &p, + (struct prefix_ipv6 *)&src_p, NULL, 0, + table, metric, distance, true); } } } - return 1; + ret = 1; + +done: + if (ctx) + dplane_ctx_fini(&ctx); + + return ret; } static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id, int startup) { - return netlink_route_change_read_unicast_internal(h, ns_id, startup, - NULL); + return netlink_route_change_read_unicast_internal(h, ns_id, startup); } static struct mcast_route_data *mroute = NULL; @@ -1615,13 +1857,10 @@ static bool _netlink_route_build_singlepath(const struct prefix *p, { char label_buf[256]; - struct vrf *vrf; char addrstr[INET6_ADDRSTRLEN]; assert(nexthop); - vrf = vrf_lookup_by_id(nexthop->vrf_id); - if (!_netlink_route_encode_label_info(nexthop, nlmsg, req_size, rtmsg, label_buf, sizeof(label_buf))) return false; @@ -1782,10 +2021,10 @@ static bool _netlink_route_build_singlepath(const struct prefix *p, } if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("%s: 5549 (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + zlog_debug("%s: 5549 (%s): %pFX nexthop via %s %s if %u vrf %u", __func__, routedesc, p, ipv4_ll_buf, label_buf, nexthop->ifindex, - VRF_LOGNAME(vrf), nexthop->vrf_id); + nexthop->vrf_id); return true; } @@ -1808,10 +2047,9 @@ static bool _netlink_route_build_singlepath(const struct prefix *p, if (IS_ZEBRA_DEBUG_KERNEL) { inet_ntop(AF_INET, &nexthop->gate.ipv4, addrstr, sizeof(addrstr)); - zlog_debug("%s: (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + zlog_debug("%s: (%s): %pFX nexthop via %s %s if %u vrf %u", __func__, routedesc, p, addrstr, label_buf, - nexthop->ifindex, VRF_LOGNAME(vrf), - nexthop->vrf_id); + nexthop->ifindex, nexthop->vrf_id); } } @@ -1832,10 +2070,9 @@ static bool _netlink_route_build_singlepath(const struct prefix *p, if (IS_ZEBRA_DEBUG_KERNEL) { inet_ntop(AF_INET6, &nexthop->gate.ipv6, addrstr, sizeof(addrstr)); - zlog_debug("%s: (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + zlog_debug("%s: (%s): %pFX nexthop via %s %s if %u vrf %u", __func__, routedesc, p, addrstr, label_buf, - nexthop->ifindex, VRF_LOGNAME(vrf), - nexthop->vrf_id); + nexthop->ifindex, nexthop->vrf_id); } } @@ -1857,9 +2094,9 @@ static bool _netlink_route_build_singlepath(const struct prefix *p, } if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("%s: (%s): %pFX nexthop via if %u vrf %s(%u)", + zlog_debug("%s: (%s): %pFX nexthop via if %u vrf %u", __func__, routedesc, p, nexthop->ifindex, - VRF_LOGNAME(vrf), nexthop->vrf_id); + nexthop->vrf_id); } return true; @@ -1943,7 +2180,6 @@ static bool _netlink_route_build_multipath(const struct prefix *p, route_tag_t tag, bool fpm) { char label_buf[256]; - struct vrf *vrf; struct rtnexthop *rtnh; rtnh = nl_attr_rtnh(nlmsg, req_size); @@ -1952,8 +2188,6 @@ static bool _netlink_route_build_multipath(const struct prefix *p, assert(nexthop); - vrf = vrf_lookup_by_id(nexthop->vrf_id); - if (!_netlink_route_encode_label_info(nexthop, nlmsg, req_size, rtmsg, label_buf, sizeof(label_buf))) return false; @@ -1976,10 +2210,9 @@ static bool _netlink_route_build_multipath(const struct prefix *p, if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "%s: 5549 (%s): %pFX nexthop via %s %s if %u vrf %s(%u)", + "%s: 5549 (%s): %pFX nexthop via %s %s if %u vrf %u", __func__, routedesc, p, ipv4_ll_buf, label_buf, - nexthop->ifindex, VRF_LOGNAME(vrf), - nexthop->vrf_id); + nexthop->ifindex, nexthop->vrf_id); nl_attr_rtnh_end(nlmsg, rtnh); return true; } @@ -1997,10 +2230,9 @@ static bool _netlink_route_build_multipath(const struct prefix *p, *src = &nexthop->src; if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("%s: (%s): %pFX nexthop via %pI4 %s if %u vrf %s(%u)", + zlog_debug("%s: (%s): %pFX nexthop via %pI4 %s if %u vrf %u", __func__, routedesc, p, &nexthop->gate.ipv4, - label_buf, nexthop->ifindex, - VRF_LOGNAME(vrf), nexthop->vrf_id); + label_buf, nexthop->ifindex, nexthop->vrf_id); } if (nexthop->type == NEXTHOP_TYPE_IPV6 || nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX) { @@ -2015,10 +2247,9 @@ static bool _netlink_route_build_multipath(const struct prefix *p, *src = &nexthop->src; if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("%s: (%s): %pFX nexthop via %pI6 %s if %u vrf %s(%u)", + zlog_debug("%s: (%s): %pFX nexthop via %pI6 %s if %u vrf %u", __func__, routedesc, p, &nexthop->gate.ipv6, - label_buf, nexthop->ifindex, - VRF_LOGNAME(vrf), nexthop->vrf_id); + label_buf, nexthop->ifindex, nexthop->vrf_id); } /* @@ -2037,9 +2268,9 @@ static bool _netlink_route_build_multipath(const struct prefix *p, *src = &nexthop->src; if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("%s: (%s): %pFX nexthop via if %u vrf %s(%u)", + zlog_debug("%s: (%s): %pFX nexthop via if %u vrf %u", __func__, routedesc, p, nexthop->ifindex, - VRF_LOGNAME(vrf), nexthop->vrf_id); + nexthop->vrf_id); } if (nexthop->weight) @@ -2357,10 +2588,10 @@ ssize_t netlink_route_multipath_msg_encode(int cmd, struct zebra_dplane_ctx *ctx } } - if ((!fpm && kernel_nexthops_supported() - && (!proto_nexthops_only() - || is_proto_nhg(dplane_ctx_get_nhe_id(ctx), 0))) - || (fpm && force_nhg)) { + if ((!fpm && kernel_nexthops_supported() && + (!proto_nexthops_only() || is_proto_nhg(dplane_ctx_get_nhe_id(ctx), 0)) && + (!src_p || !src_p->prefixlen)) || + (fpm && force_nhg)) { /* Kernel supports nexthop objects */ if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug("%s: %pFX nhg_id is %u", __func__, p, @@ -3057,9 +3288,8 @@ ssize_t netlink_nexthop_msg_encode(uint16_t cmd, nexthop_done: if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug("%s: ID (%u): %pNHv(%d) vrf %s(%u) %s ", + zlog_debug("%s: ID (%u): %pNHv(%d) vrf %u %s ", __func__, id, nh, nh->ifindex, - vrf_id_to_name(nh->vrf_id), nh->vrf_id, label_buf); } diff --git a/zebra/rt_netlink.h b/zebra/rt_netlink.h index d51944f1a4..1c113baee6 100644 --- a/zebra/rt_netlink.h +++ b/zebra/rt_netlink.h @@ -64,6 +64,15 @@ extern ssize_t netlink_macfdb_update_ctx(struct zebra_dplane_ctx *ctx, extern int netlink_route_change(struct nlmsghdr *h, ns_id_t ns_id, int startup); extern int netlink_route_read(struct zebra_ns *zns); +/* + * Public api for parsing a route notification message: this notification + * only parses the top-level route attributes, and doesn't include nexthops. + * FPM, for example, is a user. + * Returns <0 if the message should be ignored/skipped. + */ +int netlink_route_notify_read_ctx(struct nlmsghdr *h, ns_id_t ns_id, + struct zebra_dplane_ctx *ctx); + extern int netlink_nexthop_change(struct nlmsghdr *h, ns_id_t ns_id, int startup); extern int netlink_nexthop_read(struct zebra_ns *zns); @@ -109,10 +118,6 @@ netlink_put_lsp_update_msg(struct nl_batch *bth, struct zebra_dplane_ctx *ctx); extern enum netlink_msg_status netlink_put_pw_update_msg(struct nl_batch *bth, struct zebra_dplane_ctx *ctx); -int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, - ns_id_t ns_id, int startup, - struct zebra_dplane_ctx *ctx); - #ifdef NETLINK_DEBUG const char *nlmsg_type2str(uint16_t type); const char *af_type2str(int type); diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index ab55998af0..e9d554ba3d 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -509,9 +509,8 @@ int zsend_interface_update(int cmd, struct zserv *client, struct interface *ifp) return zserv_send_message(client, s); } -int zsend_redistribute_route(int cmd, struct zserv *client, - const struct route_node *rn, - const struct route_entry *re, bool is_table_direct) +int zsend_redistribute_route(int cmd, struct zserv *client, const struct route_node *rn, + const struct route_entry *re, vrf_id_t *to_vrf) { struct zapi_route api; struct zapi_nexthop *api_nh; @@ -527,9 +526,10 @@ int zsend_redistribute_route(int cmd, struct zserv *client, api.vrf_id = re->vrf_id; api.type = re->type; api.safi = SAFI_UNICAST; - if (is_table_direct) { + if (to_vrf != NULL) { api.instance = re->table; api.type = ZEBRA_ROUTE_TABLE_DIRECT; + api.vrf_id = *to_vrf; } else api.instance = re->instance; api.flags = re->flags; @@ -598,7 +598,7 @@ int zsend_redistribute_route(int cmd, struct zserv *client, /* Attributes. */ SET_FLAG(api.message, ZAPI_MESSAGE_DISTANCE); - if (is_table_direct) + if (to_vrf != NULL) api.distance = ZEBRA_TABLEDIRECT_DISTANCE_DEFAULT; else api.distance = re->distance; @@ -740,6 +740,10 @@ static int route_notify_internal(const struct route_node *rn, int type, struct zserv *client; struct stream *s; uint8_t blen; + const struct prefix *p, *src_p; + struct prefix src_dummy = {}; + + srcdest_rnode_prefixes(rn, &p, &src_p); client = zserv_find_client(type, instance); if (!client || !client->notify_owner) { @@ -771,9 +775,17 @@ static int route_notify_internal(const struct route_node *rn, int type, stream_putc(s, rn->p.family); - blen = prefix_blen(&rn->p); - stream_putc(s, rn->p.prefixlen); - stream_put(s, &rn->p.u.prefix, blen); + blen = prefix_blen(p); + stream_putc(s, p->prefixlen); + stream_put(s, &p->u.prefix, blen); + + if (!src_p) { + src_dummy.family = p->family; + src_p = &src_dummy; + } + blen = prefix_blen(src_p); + stream_putc(s, src_p->prefixlen); + stream_put(s, &src_p->u.prefix, blen); stream_putl(s, table_id); diff --git a/zebra/zapi_msg.h b/zebra/zapi_msg.h index a59ccc838b..29a5b69f18 100644 --- a/zebra/zapi_msg.h +++ b/zebra/zapi_msg.h @@ -51,10 +51,8 @@ extern void nbr_connected_delete_ipv6(struct interface *ifp, struct in6_addr *address); extern int zsend_interface_update(int cmd, struct zserv *client, struct interface *ifp); -extern int zsend_redistribute_route(int cmd, struct zserv *zclient, - const struct route_node *rn, - const struct route_entry *re, - bool is_table_direct); +extern int zsend_redistribute_route(int cmd, struct zserv *zclient, const struct route_node *rn, + const struct route_entry *re, vrf_id_t *to_vrf); extern int zsend_router_id_update(struct zserv *zclient, afi_t afi, struct prefix *p, vrf_id_t vrf_id); diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c index 88c1a04938..b57c930154 100644 --- a/zebra/zebra_dplane.c +++ b/zebra/zebra_dplane.c @@ -150,6 +150,11 @@ struct dplane_route_info { /* Optional list of extra interface info */ struct dplane_intf_extra_list_head intf_extra_list; + + /* Route-level info that aligns with some netlink route data */ + enum blackhole_type zd_bh_type; + struct ipaddr zd_prefsrc; + struct ipaddr zd_gateway; }; /* @@ -1906,6 +1911,12 @@ void dplane_ctx_set_flags(struct zebra_dplane_ctx *ctx, uint32_t flags) ctx->u.rinfo.zd_flags = flags; } +void dplane_ctx_set_route_metric(struct zebra_dplane_ctx *ctx, uint32_t metric) +{ + DPLANE_CTX_VALID(ctx); + ctx->u.rinfo.zd_metric = metric; +} + uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx) { DPLANE_CTX_VALID(ctx); @@ -1927,6 +1938,12 @@ uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx) return ctx->u.rinfo.zd_mtu; } +void dplane_ctx_set_route_mtu(struct zebra_dplane_ctx *ctx, uint32_t mtu) +{ + DPLANE_CTX_VALID(ctx); + ctx->u.rinfo.zd_mtu = mtu; +} + uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx) { DPLANE_CTX_VALID(ctx); @@ -1955,6 +1972,58 @@ uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx) return ctx->u.rinfo.zd_old_distance; } +/* Route blackhole type */ +enum blackhole_type dplane_ctx_get_route_bhtype(const struct zebra_dplane_ctx *ctx) +{ + DPLANE_CTX_VALID(ctx); + return ctx->u.rinfo.zd_bh_type; +} + +void dplane_ctx_set_route_bhtype(struct zebra_dplane_ctx *ctx, + enum blackhole_type bhtype) +{ + DPLANE_CTX_VALID(ctx); + ctx->u.rinfo.zd_bh_type = bhtype; +} + +/* IP 'preferred source', at route-level */ +const struct ipaddr *dplane_ctx_get_route_prefsrc(const struct zebra_dplane_ctx *ctx) +{ + DPLANE_CTX_VALID(ctx); + + if (ctx->u.rinfo.zd_prefsrc.ipa_type != 0) + return &(ctx->u.rinfo.zd_prefsrc); + else + return NULL; +} + +void dplane_ctx_set_route_prefsrc(struct zebra_dplane_ctx *ctx, + const struct ipaddr *addr) +{ + DPLANE_CTX_VALID(ctx); + if (addr) + ctx->u.rinfo.zd_prefsrc = *addr; + else + memset(&ctx->u.rinfo.zd_prefsrc, 0, + sizeof(ctx->u.rinfo.zd_prefsrc)); +} + +/* Route-level 'gateway' */ +const struct ipaddr *dplane_ctx_get_route_gw(const struct zebra_dplane_ctx *ctx) +{ + DPLANE_CTX_VALID(ctx); + return &(ctx->u.rinfo.zd_gateway); +} + +void dplane_ctx_set_route_gw(struct zebra_dplane_ctx *ctx, const struct ipaddr *gw) +{ + DPLANE_CTX_VALID(ctx); + if (gw) + ctx->u.rinfo.zd_gateway = *gw; + else + memset(&ctx->u.rinfo.zd_gateway, 0, sizeof(ctx->u.rinfo.zd_gateway)); +} + int dplane_ctx_tc_qdisc_get_kind(const struct zebra_dplane_ctx *ctx) { DPLANE_CTX_VALID(ctx); @@ -2179,6 +2248,12 @@ uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx) return ctx->u.rinfo.zd_nhg_id; } +void dplane_ctx_set_nhg_id(struct zebra_dplane_ctx *ctx, uint32_t nhgid) +{ + DPLANE_CTX_VALID(ctx); + ctx->u.rinfo.zd_nhg_id = nhgid; +} + const struct nexthop_group *dplane_ctx_get_ng( const struct zebra_dplane_ctx *ctx) { @@ -6923,20 +6998,6 @@ kernel_dplane_process_ipset_entry(struct zebra_dplane_provider *prov, dplane_provider_enqueue_out_ctx(prov, ctx); } -void dplane_rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, - struct prefix_ipv6 *src_p, struct route_entry *re, - struct nexthop_group *ng, int startup, - struct zebra_dplane_ctx *ctx) -{ - if (!ctx) - rib_add_multipath(afi, safi, p, src_p, re, ng, startup); - else { - dplane_ctx_route_init_basic(ctx, dplane_ctx_get_op(ctx), re, p, - src_p, afi, safi); - dplane_provider_enqueue_to_zebra(ctx); - } -} - /* * Kernel provider callback */ diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h index 285b00c9b7..cabc70c232 100644 --- a/zebra/zebra_dplane.h +++ b/zebra/zebra_dplane.h @@ -524,11 +524,26 @@ uint32_t dplane_ctx_get_flags(const struct zebra_dplane_ctx *ctx); void dplane_ctx_set_flags(struct zebra_dplane_ctx *ctx, uint32_t flags); uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx); uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx); +void dplane_ctx_set_route_metric(struct zebra_dplane_ctx *ctx, uint32_t metric); +void dplane_ctx_set_route_mtu(struct zebra_dplane_ctx *ctx, uint32_t mtu); uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx); uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx); uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx); void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance); uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx); +/* Route blackhole type */ +enum blackhole_type dplane_ctx_get_route_bhtype( + const struct zebra_dplane_ctx *ctx); +void dplane_ctx_set_route_bhtype(struct zebra_dplane_ctx *ctx, + enum blackhole_type bhtype); +/* IPv4 'preferred source', at route-level */ +const struct ipaddr *dplane_ctx_get_route_prefsrc( + const struct zebra_dplane_ctx *ctx); +void dplane_ctx_set_route_prefsrc(struct zebra_dplane_ctx *ctx, + const struct ipaddr *addr); +/* Route 'gateway', at route-level */ +const struct ipaddr *dplane_ctx_get_route_gw(const struct zebra_dplane_ctx *ctx); +void dplane_ctx_set_route_gw(struct zebra_dplane_ctx *ctx, const struct ipaddr *gw); /* Accessors for traffic control context */ int dplane_ctx_tc_qdisc_get_kind(const struct zebra_dplane_ctx *ctx); @@ -572,6 +587,7 @@ void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh); void dplane_ctx_set_backup_nhg(struct zebra_dplane_ctx *ctx, const struct nexthop_group *nhg); +void dplane_ctx_set_nhg_id(struct zebra_dplane_ctx *ctx, uint32_t nhgid); uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx); const struct nexthop_group *dplane_ctx_get_ng( const struct zebra_dplane_ctx *ctx); @@ -1256,16 +1272,6 @@ void zebra_dplane_shutdown(void); void zebra_dplane_startup_stage(struct zebra_ns *zns, enum zebra_dplane_startup_notifications spot); -/* - * decision point for sending a routing update through the old - * straight to zebra master pthread or through the dplane to - * the master pthread for handling - */ -void dplane_rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, - struct prefix_ipv6 *src_p, struct route_entry *re, - struct nexthop_group *ng, int startup, - struct zebra_dplane_ctx *ctx); - enum zebra_dplane_startup_notifications dplane_ctx_get_startup_spot(struct zebra_dplane_ctx *ctx); diff --git a/zebra/zebra_nb_config.c b/zebra/zebra_nb_config.c index ec151360bd..d99010547f 100644 --- a/zebra/zebra_nb_config.c +++ b/zebra/zebra_nb_config.c @@ -3358,10 +3358,7 @@ int lib_vrf_zebra_filter_protocol_create(struct nb_cb_create_args *args) const char *proto = yang_dnode_get_string(args->dnode, "protocol"); int rtype; - if (strcasecmp(proto, "any") == 0) - rtype = ZEBRA_ROUTE_MAX; - else - rtype = proto_name2num(proto); + rtype = proto_name2num(proto); if (args->event == NB_EV_VALIDATE) if (rtype < 0) { @@ -3387,10 +3384,7 @@ int lib_vrf_zebra_filter_protocol_destroy(struct nb_cb_destroy_args *args) yang_afi_safi_identity2value(afi_safi, &afi, &safi); - if (strcasecmp(proto, "any") == 0) - rtype = ZEBRA_ROUTE_MAX; - else - rtype = proto_name2num(proto); + rtype = proto_name2num(proto); /* deleting an existing entry, it can't be invalid */ assert(rtype >= 0); @@ -3418,10 +3412,7 @@ void lib_vrf_zebra_filter_protocol_apply_finish( yang_afi_safi_identity2value(afi_safi, &afi, &safi); - if (strcasecmp(proto, "any") == 0) - rtype = ZEBRA_ROUTE_MAX; - else - rtype = proto_name2num(proto); + rtype = proto_name2num(proto); /* finishing apply for a validated entry, it can't be invalid */ assert(rtype >= 0); diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index a32fc2bb14..f5141c8f23 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -572,8 +572,7 @@ bool zebra_nhg_hash_equal(const void *arg1, const void *arg2) /* Nexthops should be in-order, so we simply compare them in-place */ for (nexthop1 = nhe1->nhg.nexthop, nexthop2 = nhe2->nhg.nexthop; nexthop1 && nexthop2; - nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) { - + nexthop1 = nexthop_next(nexthop1), nexthop2 = nexthop_next(nexthop2)) { if (!nhg_compare_nexthops(nexthop1, nexthop2)) return false; } @@ -608,8 +607,7 @@ bool zebra_nhg_hash_equal(const void *arg1, const void *arg2) for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop, nexthop2 = nhe2->backup_info->nhe->nhg.nexthop; nexthop1 && nexthop2; - nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) { - + nexthop1 = nexthop_next(nexthop1), nexthop2 = nexthop_next(nexthop2)) { if (!nhg_compare_nexthops(nexthop1, nexthop2)) return false; } @@ -1762,7 +1760,8 @@ void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe) nhe->refcnt--; if (!zebra_router_in_shutdown() && nhe->refcnt <= 0 && - CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED) && + (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED) || + CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED)) && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND)) { nhe->refcnt = 1; SET_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND); @@ -3089,7 +3088,7 @@ static struct nhg_hash_entry *zebra_nhg_rib_compare_old_nhe( int nexthop_active_update(struct route_node *rn, struct route_entry *re, struct route_entry *old_re) { - struct nhg_hash_entry *curr_nhe; + struct nhg_hash_entry *curr_nhe, *remove; uint32_t curr_active = 0, backup_active = 0; if (PROTO_OWNED(re->nhe)) @@ -3143,16 +3142,25 @@ backups_done: new_nhe = zebra_nhg_rib_find_nhe(curr_nhe, rt_afi); - if (old_re && old_re->type == re->type && - old_re->instance == re->instance) + remove = new_nhe; + + if (old_re && old_re->type == re->type && old_re->instance == re->instance && + new_nhe != old_re->nhe) new_nhe = zebra_nhg_rib_compare_old_nhe(rn, re, new_nhe, old_re->nhe); if (IS_ZEBRA_DEBUG_NHG_DETAIL) - zlog_debug( - "%s: re %p CHANGED: nhe %p (%pNG) => new_nhe %p (%pNG)", - __func__, re, re->nhe, re->nhe, new_nhe, - new_nhe); + zlog_debug("%s: re %p CHANGED: nhe %p (%pNG) => new_nhe %p (%pNG) rib_find_nhe returned %p (%pNG) refcnt: %d", + __func__, re, re->nhe, re->nhe, new_nhe, new_nhe, remove, remove, + remove ? remove->refcnt : 0); + + /* + * if the results from zebra_nhg_rib_find_nhe is being + * dropped and it was generated in that function + * (refcnt of 0) then we know we can clean it up + */ + if (remove && remove != new_nhe && remove != re->nhe && remove->refcnt == 0) + zebra_nhg_handle_uninstall(remove); route_entry_update_nhe(re, new_nhe); } @@ -3373,7 +3381,17 @@ void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe, uint8_t type) void zebra_nhg_uninstall_kernel(struct nhg_hash_entry *nhe) { - if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)) { + /* + * Clearly if the nexthop group is installed we should + * remove it. Additionally If the nexthop is already + * QUEUED for installation, we should also just send + * a deletion down as well. We cannot necessarily pluck + * the installation out of the queue ( since it may have + * already been acted on, but not processed yet in the + * main pthread ). + */ + if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED) || + CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED)) { int ret = dplane_nexthop_delete(nhe); switch (ret) { @@ -3445,7 +3463,13 @@ void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx) ZAPI_NHG_INSTALLED); break; case ZEBRA_DPLANE_REQUEST_FAILURE: - UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); + /* + * With a request failure it is unknown what we now know + * this is because Zebra has lost track of whether or not + * any previous versions of this NHG are in the kernel + * or even what those versions were. So at this point + * we cannot unset the INSTALLED flag. + */ /* If daemon nhg, send it an update */ if (PROTO_OWNED(nhe)) zsend_nhg_notify(nhe->type, nhe->zapi_instance, @@ -3909,7 +3933,14 @@ void zebra_interface_nhg_reinstall(struct interface *ifp) __func__, ifp->name); frr_each (nhg_connected_tree, &zif->nhg_dependents, rb_node_dep) { + /* + * The nexthop associated with this was set as !ACTIVE + * so we need to turn it back to active when we get to + * this point again + */ + SET_FLAG(rb_node_dep->nhe->nhg.nexthop->flags, NEXTHOP_FLAG_ACTIVE); nh = rb_node_dep->nhe->nhg.nexthop; + if (zebra_nhg_set_valid_if_active(rb_node_dep->nhe)) { if (IS_ZEBRA_DEBUG_NHG_DETAIL) zlog_debug( diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index 0226c355c8..2881192eb7 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -2220,8 +2220,20 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) bool fib_changed = false; bool debug_p = IS_ZEBRA_DEBUG_DPLANE | IS_ZEBRA_DEBUG_RIB; int start_count, end_count; + vrf_id_t vrf_id; + int tableid; + + /* Locate vrf and route table - we must have one or the other */ + tableid = dplane_ctx_get_table(ctx); + vrf_id = dplane_ctx_get_vrf(ctx); + if (vrf_id == VRF_UNKNOWN) + vrf_id = zebra_vrf_lookup_by_table(tableid, + dplane_ctx_get_ns_id(ctx)); + else if (tableid == ZEBRA_ROUTE_TABLE_UNKNOWN) + tableid = zebra_vrf_lookup_tableid(vrf_id, + dplane_ctx_get_ns_id(ctx)); - vrf = vrf_lookup_by_id(dplane_ctx_get_vrf(ctx)); + vrf = vrf_lookup_by_id(vrf_id); /* Locate rn and re(s) from ctx */ rn = rib_find_rn_from_ctx(ctx); @@ -2230,7 +2242,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) zlog_debug( "Failed to process dplane notification: no routes for %s(%u:%u):%pRN", VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), - dplane_ctx_get_table(ctx), rn); + tableid, rn); } goto done; } @@ -2240,7 +2252,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) if (debug_p) zlog_debug("%s(%u:%u):%pRN Processing dplane notif ctx %p", VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), - dplane_ctx_get_table(ctx), rn, ctx); + tableid, rn, ctx); /* * Take a pass through the routes, look for matches with the context @@ -2257,7 +2269,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) zlog_debug( "%s(%u:%u):%pRN Unable to process dplane notification: no entry for type %s", VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), - dplane_ctx_get_table(ctx), rn, + tableid, rn, zebra_route_string(dplane_ctx_get_type(ctx))); goto done; @@ -2293,7 +2305,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) "%s(%u:%u):%pRN dplane notif, uninstalled type %s route", VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), - dplane_ctx_get_table(ctx), rn, + tableid, rn, zebra_route_string( dplane_ctx_get_type(ctx))); } else { @@ -2303,7 +2315,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) "%s(%u:%u):%pRN dplane notif, but type %s not selected_fib", VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), - dplane_ctx_get_table(ctx), rn, + tableid, rn, zebra_route_string( dplane_ctx_get_type(ctx))); } @@ -2342,7 +2354,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) zlog_debug( "%s(%u:%u):%pRN dplane notification: rib_update returns FALSE", VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), - dplane_ctx_get_table(ctx), rn); + tableid, rn); } /* @@ -2361,7 +2373,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) "%s(%u:%u):%pRN applied nexthop changes from dplane notification", VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), - dplane_ctx_get_table(ctx), rn); + tableid, rn); /* Changed nexthops - update kernel/others */ dplane_route_notif_update(rn, re, @@ -2373,7 +2385,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) "%s(%u:%u):%pRN installed transition from dplane notification", VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), - dplane_ctx_get_table(ctx), rn); + tableid, rn); /* We expect this to be the selected route, so we want * to tell others about this transition. @@ -2393,7 +2405,7 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx) "%s(%u:%u):%pRN un-installed transition from dplane notification", VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx), - dplane_ctx_get_table(ctx), rn); + tableid, rn); /* Transition from _something_ installed to _nothing_ * installed. @@ -3973,10 +3985,10 @@ static void rib_link(struct route_node *rn, struct route_entry *re, int process) dest = rib_dest_from_rnode(rn); if (!dest) { + dest = zebra_rib_create_dest(rn); + if (IS_ZEBRA_DEBUG_RIB_DETAILED) rnode_debug(rn, re->vrf_id, "rn %p adding dest", rn); - - dest = zebra_rib_create_dest(rn); } re_list_add_head(&dest->routes, re); diff --git a/zebra/zebra_routemap.c b/zebra/zebra_routemap.c index 29bbf6023d..73ffa09c16 100644 --- a/zebra/zebra_routemap.c +++ b/zebra/zebra_routemap.c @@ -114,11 +114,6 @@ static void show_vrf_proto_rm(struct vty *vty, struct zebra_vrf *zvrf, vty_out(vty, "%-24s : none\n", zebra_route_string(i)); } - if (PROTO_RM_NAME(zvrf, af_type, i)) - vty_out(vty, "%-24s : %-10s\n", "any", - PROTO_RM_NAME(zvrf, af_type, i)); - else - vty_out(vty, "%-24s : none\n", "any"); } static void show_vrf_nht_rm(struct vty *vty, struct zebra_vrf *zvrf, @@ -1222,8 +1217,8 @@ route_map_result_t zebra_route_map_check(afi_t family, struct route_entry *re, return RMAP_DENYMATCH; } if (!rmap) { - rm_name = PROTO_RM_NAME(zvrf, family, ZEBRA_ROUTE_MAX); - rmap = PROTO_RM_MAP(zvrf, family, ZEBRA_ROUTE_MAX); + rm_name = PROTO_RM_NAME(zvrf, family, ZEBRA_ROUTE_ALL); + rmap = PROTO_RM_MAP(zvrf, family, ZEBRA_ROUTE_ALL); if (rm_name && !rmap) return RMAP_DENYMATCH; diff --git a/zebra/zebra_srv6.c b/zebra/zebra_srv6.c index 624f60e815..6d228c5e24 100644 --- a/zebra/zebra_srv6.c +++ b/zebra/zebra_srv6.c @@ -1547,9 +1547,26 @@ static int get_srv6_sid_explicit(struct zebra_srv6_sid **sid, } if (ctx->behavior == ZEBRA_SEG6_LOCAL_ACTION_END) { - zlog_err("%s: invalid SM request arguments: explicit SID allocation not allowed for End/uN behavior", - __func__); - return -1; + zctx = zebra_srv6_sid_ctx_alloc(); + zctx->ctx = *ctx; + + *sid = zebra_srv6_sid_alloc(zctx, sid_value, locator, block, sid_func, + SRV6_SID_ALLOC_MODE_EXPLICIT); + if (!(*sid)) { + flog_err(EC_ZEBRA_SM_CANNOT_ASSIGN_SID, + "%s: failed to create SRv6 SID %s (%pI6)", __func__, + srv6_sid_ctx2str(buf, sizeof(buf), ctx), sid_value); + return -1; + } + (*sid)->ctx = zctx; + zctx->sid = *sid; + listnode_add(srv6->sids, zctx); + + if (IS_ZEBRA_DEBUG_SRV6) + zlog_debug("%s: allocated explicit SRv6 SID %pI6 for context %s", __func__, + &(*sid)->value, srv6_sid_ctx2str(buf, sizeof(buf), ctx)); + + return 1; } /* Allocate an explicit SID function for the SID */ diff --git a/zebra/zebra_vrf.c b/zebra/zebra_vrf.c index 2b3cfc8766..7bfe07b4cf 100644 --- a/zebra/zebra_vrf.c +++ b/zebra/zebra_vrf.c @@ -98,6 +98,14 @@ static int zebra_vrf_new(struct vrf *vrf) zvrf = zebra_vrf_alloc(vrf); if (!vrf_is_backend_netns()) zvrf->zns = zebra_ns_lookup(NS_DEFAULT); + else if (vrf->vrf_id == VRF_DEFAULT) { + struct ns *ns; + + strlcpy(vrf->data.l.netns_name, VRF_DEFAULT_NAME, NS_NAMSIZ); + ns = ns_lookup(NS_DEFAULT); + ns->vrf_ctxt = vrf; + vrf->ns_ctxt = ns; + } otable_init(&zvrf->other_tables); @@ -417,6 +425,25 @@ vrf_id_t zebra_vrf_lookup_by_table(uint32_t table_id, ns_id_t ns_id) return VRF_DEFAULT; } +/* + * Lookup tableid by vrfid; handle vrf-lite and vrf-netns cases + */ +int zebra_vrf_lookup_tableid(vrf_id_t vrf_id, ns_id_t ns_id) +{ + struct zebra_vrf *zvrf; + + /* Handle vrf-lite and vrf-netns */ + if (vrf_is_backend_netns()) + zvrf = vrf_info_lookup(ns_id); + else + zvrf = vrf_info_lookup(vrf_id); + + if (zvrf) + return zvrf->table_id; + else + return ZEBRA_ROUTE_TABLE_UNKNOWN; +} + /* Lookup VRF by identifier. */ struct zebra_vrf *zebra_vrf_lookup_by_id(vrf_id_t vrf_id) { diff --git a/zebra/zebra_vrf.h b/zebra/zebra_vrf.h index f97138c811..334bb93684 100644 --- a/zebra/zebra_vrf.h +++ b/zebra/zebra_vrf.h @@ -24,6 +24,8 @@ FRR_CFG_DEFAULT_BOOL(ZEBRA_IP_NHT_RESOLVE_VIA_DEFAULT, { .val_bool = false }, ); +#define ZEBRA_ROUTE_TABLE_UNKNOWN 0 + /* MPLS (Segment Routing) global block */ struct mpls_srgb { uint32_t start_label; @@ -247,6 +249,7 @@ extern struct zebra_vrf *zebra_vrf_lookup_by_name(const char *); extern vrf_id_t zebra_vrf_lookup_by_table(uint32_t table_id, ns_id_t ns_id); extern struct zebra_vrf *zebra_vrf_alloc(struct vrf *vrf); extern struct route_table *zebra_vrf_table(afi_t, safi_t, vrf_id_t); +int zebra_vrf_lookup_tableid(vrf_id_t vrf_id, ns_id_t ns_id); /* * API to associate a VRF with a NETNS. diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c index 582d15627c..a1731712d3 100644 --- a/zebra/zebra_vty.c +++ b/zebra/zebra_vty.c @@ -1789,9 +1789,24 @@ DEFPY (show_route_detail, rib_dest_t *dest; bool network_found = false; bool show_ng = !!ng; + int idx = 0; + + /* + * Return error if V6 address/prefix is passed as an argument to + * "show ip route" cmd. + * + * When "show ip route <X:X::X:X|X:X::X:X/M>" is queried, + * argv[idx]->text will be set to "ipv6" but argv[idx]->arg will be set + * to "ip". + */ + if (argv_find(argv, argc, "ipv6", &idx) && !strcmp(argv[idx]->arg, "ip")) { + vty_out(vty, "%% Cannot specify IPv6 address/prefix for IPv4 table\n"); + return CMD_WARNING; + } if (address_str) prefix_str = address_str; + if (str2prefix(prefix_str, &p) < 0) { vty_out(vty, "%% Malformed address\n"); return CMD_WARNING; |
